From 635d8e55179780606164db84d26cb6fe8db4e303 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Sat, 23 Feb 2019 13:59:40 +0100 Subject: [PATCH 1/3] controllers: refactor code and start informers first This patch does various things, all related: - start the informers for controllers before running them to follow what https://github.com/kubernetes/sample-controller/blob/master/main.go#L64-L71 does and to avoid races already spotted in unit tests (https://github.com/openshift/machine-config-operator/pull/457) - move the clients builder code from cmd/common to a new package just for that under the new internal/ folder so nobody but us can use that - move common controllers code under pkg/controller/common to be reused - create an e2e only clientset, avoiding us to type client version everytime (eg client.CoreV1()..). This is a need cause if in the future the api change, we don't play grep for a week replacing old apis... Signed-off-by: Antonio Murdaca --- cmd/common/helpers.go | 13 +- cmd/machine-config-controller/start.go | 113 +++++++++--------- cmd/machine-config-daemon/start.go | 9 +- cmd/machine-config-operator/start.go | 61 +++++----- .../clients/builder.go | 23 ++-- pkg/controller/common/controller.go | 6 + .../controller}/common/controller_context.go | 23 +++- test/e2e/framework/clientset.go | 52 ++++++++ test/e2e/mcd_test.go | 60 ++++------ test/e2e/mco_test.go | 11 +- test/e2e/osimageurl_test.go | 17 +-- test/e2e/sanity_test.go | 11 +- 12 files changed, 217 insertions(+), 182 deletions(-) rename cmd/common/client_builder.go => internal/clients/builder.go (72%) create mode 100644 pkg/controller/common/controller.go rename {cmd => pkg/controller}/common/controller_context.go (80%) create mode 100644 test/e2e/framework/clientset.go diff --git a/cmd/common/helpers.go b/cmd/common/helpers.go index 0653092f8e..d466fa526b 100644 --- a/cmd/common/helpers.go +++ b/cmd/common/helpers.go @@ -1,10 +1,10 @@ package common import ( - "math/rand" "os" "time" + "github.com/openshift/machine-config-operator/internal/clients" "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -21,19 +21,10 @@ const ( RenewDeadline = 60 * time.Second // RetryPeriod is the default duration for the leader electrion retrial. RetryPeriod = 30 * time.Second - - minResyncPeriod = 20 * time.Minute ) -func resyncPeriod() func() time.Duration { - return func() time.Duration { - factor := rand.Float64() + 1 - return time.Duration(float64(minResyncPeriod.Nanoseconds()) * factor) - } -} - // CreateResourceLock returns an interface for the resource lock. -func CreateResourceLock(cb *ClientBuilder, componentNamespace, componentName string) resourcelock.Interface { +func CreateResourceLock(cb *clients.Builder, componentNamespace, componentName string) resourcelock.Interface { recorder := record. NewBroadcaster(). NewRecorder(runtime.NewScheme(), v1.EventSource{Component: componentName}) diff --git a/cmd/machine-config-controller/start.go b/cmd/machine-config-controller/start.go index 1323afff22..11263b451e 100644 --- a/cmd/machine-config-controller/start.go +++ b/cmd/machine-config-controller/start.go @@ -6,6 +6,8 @@ import ( "github.com/golang/glog" "github.com/openshift/machine-config-operator/cmd/common" + "github.com/openshift/machine-config-operator/internal/clients" + controllercommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/pkg/controller/container-runtime-config" "github.com/openshift/machine-config-operator/pkg/controller/kubelet-config" "github.com/openshift/machine-config-operator/pkg/controller/node" @@ -46,15 +48,14 @@ func runStartCmd(cmd *cobra.Command, args []string) { // To help debugging, immediately log version glog.Infof("Version: %+v", version.Version) - cb, err := common.NewClientBuilder(startOpts.kubeconfig) + cb, err := clients.NewBuilder(startOpts.kubeconfig) if err != nil { glog.Fatalf("error creating clients: %v", err) } run := func(ctx context.Context) { - ctrlctx := common.CreateControllerContext(cb, ctx.Done(), componentName) - if err := startControllers(ctrlctx); err != nil { - glog.Fatalf("error starting controllers: %v", err) - } + ctrlctx := controllercommon.CreateControllerContext(cb, ctx.Done(), componentName) + + controllers := createControllers(ctrlctx) // Start the shared factory informers that you need to use in your controller ctrlctx.InformerFactory.Start(ctrlctx.Stop) @@ -62,6 +63,10 @@ func runStartCmd(cmd *cobra.Command, args []string) { ctrlctx.ConfigInformerFactory.Start(ctrlctx.Stop) close(ctrlctx.InformersStarted) + for _, c := range controllers { + go c.Run(2, ctrlctx.Stop) + } + select {} } @@ -80,53 +85,53 @@ func runStartCmd(cmd *cobra.Command, args []string) { panic("unreachable") } -func startControllers(ctx *common.ControllerContext) error { - // Our primary MCs come from here - go template.New( - rootOpts.templates, - ctx.InformerFactory.Machineconfiguration().V1().ControllerConfigs(), - ctx.InformerFactory.Machineconfiguration().V1().MachineConfigs(), - ctx.ClientBuilder.KubeClientOrDie("template-controller"), - ctx.ClientBuilder.MachineConfigClientOrDie("template-controller"), - ).Run(2, ctx.Stop) - - // Add all "sub-renderers here" - go kubeletconfig.New( - rootOpts.templates, - ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), - ctx.InformerFactory.Machineconfiguration().V1().ControllerConfigs(), - ctx.InformerFactory.Machineconfiguration().V1().KubeletConfigs(), - ctx.ClientBuilder.KubeClientOrDie("kubelet-config-controller"), - ctx.ClientBuilder.MachineConfigClientOrDie("kubelet-config-controller"), - ).Run(2, ctx.Stop) - - go containerruntimeconfig.New( - rootOpts.templates, - ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), - ctx.InformerFactory.Machineconfiguration().V1().ControllerConfigs(), - ctx.InformerFactory.Machineconfiguration().V1().ContainerRuntimeConfigs(), - ctx.ConfigInformerFactory.Config().V1().Images(), - ctx.ClientBuilder.KubeClientOrDie("container-runtime-config-controller"), - ctx.ClientBuilder.MachineConfigClientOrDie("container-runtime-config-controller"), - ctx.ClientBuilder.ConfigClientOrDie("container-runtime-config-controller"), - ).Run(2, ctx.Stop) - - // The renderer creates "rendered" MCs from the MC fragments generated by - // the above sub-controllers, which are then consumed by the node controller - go render.New( - ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), - ctx.InformerFactory.Machineconfiguration().V1().MachineConfigs(), - ctx.ClientBuilder.KubeClientOrDie("render-controller"), - ctx.ClientBuilder.MachineConfigClientOrDie("render-controller"), - ).Run(2, ctx.Stop) - - // The node controller consumes data written by the above - go node.New( - ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), - ctx.KubeInformerFactory.Core().V1().Nodes(), - ctx.ClientBuilder.KubeClientOrDie("node-update-controller"), - ctx.ClientBuilder.MachineConfigClientOrDie("node-update-controller"), - ).Run(2, ctx.Stop) - - return nil +func createControllers(ctx *controllercommon.ControllerContext) []controllercommon.Controller { + var controllers []controllercommon.Controller + + controllers = append(controllers, + // Our primary MCs come from here + template.New( + rootOpts.templates, + ctx.InformerFactory.Machineconfiguration().V1().ControllerConfigs(), + ctx.InformerFactory.Machineconfiguration().V1().MachineConfigs(), + ctx.ClientBuilder.KubeClientOrDie("template-controller"), + ctx.ClientBuilder.MachineConfigClientOrDie("template-controller"), + ), + // Add all "sub-renderers here" + kubeletconfig.New( + rootOpts.templates, + ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), + ctx.InformerFactory.Machineconfiguration().V1().ControllerConfigs(), + ctx.InformerFactory.Machineconfiguration().V1().KubeletConfigs(), + ctx.ClientBuilder.KubeClientOrDie("kubelet-config-controller"), + ctx.ClientBuilder.MachineConfigClientOrDie("kubelet-config-controller"), + ), + containerruntimeconfig.New( + rootOpts.templates, + ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), + ctx.InformerFactory.Machineconfiguration().V1().ControllerConfigs(), + ctx.InformerFactory.Machineconfiguration().V1().ContainerRuntimeConfigs(), + ctx.ConfigInformerFactory.Config().V1().Images(), + ctx.ClientBuilder.KubeClientOrDie("container-runtime-config-controller"), + ctx.ClientBuilder.MachineConfigClientOrDie("container-runtime-config-controller"), + ctx.ClientBuilder.ConfigClientOrDie("container-runtime-config-controller"), + ), + // The renderer creates "rendered" MCs from the MC fragments generated by + // the above sub-controllers, which are then consumed by the node controller + render.New( + ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), + ctx.InformerFactory.Machineconfiguration().V1().MachineConfigs(), + ctx.ClientBuilder.KubeClientOrDie("render-controller"), + ctx.ClientBuilder.MachineConfigClientOrDie("render-controller"), + ), + // The node controller consumes data written by the above + node.New( + ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), + ctx.KubeInformerFactory.Core().V1().Nodes(), + ctx.ClientBuilder.KubeClientOrDie("node-update-controller"), + ctx.ClientBuilder.MachineConfigClientOrDie("node-update-controller"), + ), + ) + + return controllers } diff --git a/cmd/machine-config-daemon/start.go b/cmd/machine-config-daemon/start.go index 37b1d195ff..a4c56a0dde 100644 --- a/cmd/machine-config-daemon/start.go +++ b/cmd/machine-config-daemon/start.go @@ -6,7 +6,8 @@ import ( "syscall" "github.com/golang/glog" - "github.com/openshift/machine-config-operator/cmd/common" + "github.com/openshift/machine-config-operator/internal/clients" + controllercommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/pkg/daemon" "github.com/openshift/machine-config-operator/pkg/version" "github.com/spf13/cobra" @@ -82,7 +83,7 @@ func runStartCmd(cmd *cobra.Command, args []string) { defer close(exitCh) var dn *daemon.Daemon - var ctx *common.ControllerContext + var ctx *controllercommon.ControllerContext glog.Info("starting node writer") nodeWriter := daemon.NewNodeWriter() @@ -108,11 +109,11 @@ func runStartCmd(cmd *cobra.Command, args []string) { } // Else we use the cluster driven daemon } else { - cb, err := common.NewClientBuilder(startOpts.kubeconfig) + cb, err := clients.NewBuilder(startOpts.kubeconfig) if err != nil { glog.Fatalf("failed to initialize ClientBuilder: %v", err) } - ctx = common.CreateControllerContext(cb, stopCh, componentName) + ctx = controllercommon.CreateControllerContext(cb, stopCh, componentName) // create the daemon instance. this also initializes kube client items // which need to come from the container and not the chroot. dn, err = daemon.NewClusterDrivenDaemon( diff --git a/cmd/machine-config-operator/start.go b/cmd/machine-config-operator/start.go index a74d5e3025..4ff9b55a3a 100644 --- a/cmd/machine-config-operator/start.go +++ b/cmd/machine-config-operator/start.go @@ -6,6 +6,8 @@ import ( "github.com/golang/glog" "github.com/openshift/machine-config-operator/cmd/common" + "github.com/openshift/machine-config-operator/internal/clients" + controllercommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/pkg/operator" "github.com/openshift/machine-config-operator/pkg/version" "github.com/spf13/cobra" @@ -43,15 +45,35 @@ func runStartCmd(cmd *cobra.Command, args []string) { glog.Fatal("--images-json cannot be empty") } - cb, err := common.NewClientBuilder(startOpts.kubeconfig) + cb, err := clients.NewBuilder(startOpts.kubeconfig) if err != nil { glog.Fatalf("error creating clients: %v", err) } run := func(ctx context.Context) { - ctrlctx := common.CreateControllerContext(cb, ctx.Done(), componentNamespace) - if err := startControllers(ctrlctx); err != nil { - glog.Fatalf("error starting controllers: %v", err) - } + ctrlctx := controllercommon.CreateControllerContext(cb, ctx.Done(), componentNamespace) + + controller := operator.New( + componentNamespace, componentName, + startOpts.imagesFile, + ctrlctx.NamespacedInformerFactory.Machineconfiguration().V1().MCOConfigs(), + ctrlctx.NamespacedInformerFactory.Machineconfiguration().V1().MachineConfigPools(), + ctrlctx.NamespacedInformerFactory.Machineconfiguration().V1().ControllerConfigs(), + ctrlctx.NamespacedInformerFactory.Machineconfiguration().V1().MachineConfigs(), + ctrlctx.NamespacedInformerFactory.Machineconfiguration().V1().ControllerConfigs(), + ctrlctx.KubeNamespacedInformerFactory.Core().V1().ServiceAccounts(), + ctrlctx.APIExtInformerFactory.Apiextensions().V1beta1().CustomResourceDefinitions(), + ctrlctx.KubeNamespacedInformerFactory.Apps().V1().Deployments(), + ctrlctx.KubeNamespacedInformerFactory.Apps().V1().DaemonSets(), + ctrlctx.KubeNamespacedInformerFactory.Rbac().V1().ClusterRoles(), + ctrlctx.KubeNamespacedInformerFactory.Rbac().V1().ClusterRoleBindings(), + ctrlctx.KubeNamespacedInformerFactory.Core().V1().ConfigMaps(), + ctrlctx.ConfigInformerFactory.Config().V1().Infrastructures(), + ctrlctx.ConfigInformerFactory.Config().V1().Networks(), + ctrlctx.ClientBuilder.MachineConfigClientOrDie(componentName), + ctrlctx.ClientBuilder.KubeClientOrDie(componentName), + ctrlctx.ClientBuilder.APIExtClientOrDie(componentName), + ctrlctx.ClientBuilder.ConfigClientOrDie(componentName), + ) ctrlctx.NamespacedInformerFactory.Start(ctrlctx.Stop) ctrlctx.KubeInformerFactory.Start(ctrlctx.Stop) @@ -60,6 +82,8 @@ func runStartCmd(cmd *cobra.Command, args []string) { ctrlctx.ConfigInformerFactory.Start(ctrlctx.Stop) close(ctrlctx.InformersStarted) + go controller.Run(2, ctrlctx.Stop) + select {} } @@ -77,30 +101,3 @@ func runStartCmd(cmd *cobra.Command, args []string) { }) panic("unreachable") } - -func startControllers(ctx *common.ControllerContext) error { - go operator.New( - componentNamespace, componentName, - startOpts.imagesFile, - ctx.NamespacedInformerFactory.Machineconfiguration().V1().MCOConfigs(), - ctx.NamespacedInformerFactory.Machineconfiguration().V1().MachineConfigPools(), - ctx.NamespacedInformerFactory.Machineconfiguration().V1().ControllerConfigs(), - ctx.NamespacedInformerFactory.Machineconfiguration().V1().MachineConfigs(), - ctx.NamespacedInformerFactory.Machineconfiguration().V1().ControllerConfigs(), - ctx.KubeNamespacedInformerFactory.Core().V1().ServiceAccounts(), - ctx.APIExtInformerFactory.Apiextensions().V1beta1().CustomResourceDefinitions(), - ctx.KubeNamespacedInformerFactory.Apps().V1().Deployments(), - ctx.KubeNamespacedInformerFactory.Apps().V1().DaemonSets(), - ctx.KubeNamespacedInformerFactory.Rbac().V1().ClusterRoles(), - ctx.KubeNamespacedInformerFactory.Rbac().V1().ClusterRoleBindings(), - ctx.KubeNamespacedInformerFactory.Core().V1().ConfigMaps(), - ctx.ConfigInformerFactory.Config().V1().Infrastructures(), - ctx.ConfigInformerFactory.Config().V1().Networks(), - ctx.ClientBuilder.MachineConfigClientOrDie(componentName), - ctx.ClientBuilder.KubeClientOrDie(componentName), - ctx.ClientBuilder.APIExtClientOrDie(componentName), - ctx.ClientBuilder.ConfigClientOrDie(componentName), - ).Run(2, ctx.Stop) - - return nil -} diff --git a/cmd/common/client_builder.go b/internal/clients/builder.go similarity index 72% rename from cmd/common/client_builder.go rename to internal/clients/builder.go index c990eb7bc0..914debd1e3 100644 --- a/cmd/common/client_builder.go +++ b/internal/clients/builder.go @@ -1,47 +1,46 @@ -package common +package clients import ( "os" "github.com/golang/glog" configclientset "github.com/openshift/client-go/config/clientset/versioned" + mcfgclientset "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned" apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - - mcfgclientset "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned" ) -// ClientBuilder can create a variety of kubernetes client interface +// Builder can create a variety of kubernetes client interface // with its embeded rest.Config. -type ClientBuilder struct { +type Builder struct { config *rest.Config } // MachineConfigClientOrDie returns the kubernetes client interface for machine config. -func (cb *ClientBuilder) MachineConfigClientOrDie(name string) mcfgclientset.Interface { +func (cb *Builder) MachineConfigClientOrDie(name string) mcfgclientset.Interface { return mcfgclientset.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) } // KubeClientOrDie returns the kubernetes client interface for general kubernetes objects. -func (cb *ClientBuilder) KubeClientOrDie(name string) kubernetes.Interface { +func (cb *Builder) KubeClientOrDie(name string) kubernetes.Interface { return kubernetes.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) } // ConfigClientOrDie returns the kubernetes client interface for security related kubernetes objects // such as pod security policy, security context. -func (cb *ClientBuilder) ConfigClientOrDie(name string) configclientset.Interface { +func (cb *Builder) ConfigClientOrDie(name string) configclientset.Interface { return configclientset.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) } // APIExtClientOrDie returns the kubernetes client interface for extended kubernetes objects. -func (cb *ClientBuilder) APIExtClientOrDie(name string) apiext.Interface { +func (cb *Builder) APIExtClientOrDie(name string) apiext.Interface { return apiext.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) } -// NewClientBuilder returns a *ClientBuilder with the given kubeconfig. -func NewClientBuilder(kubeconfig string) (*ClientBuilder, error) { +// NewBuilder returns a *ClientBuilder with the given kubeconfig. +func NewBuilder(kubeconfig string) (*Builder, error) { var config *rest.Config var err error @@ -60,7 +59,7 @@ func NewClientBuilder(kubeconfig string) (*ClientBuilder, error) { return nil, err } - return &ClientBuilder{ + return &Builder{ config: config, }, nil } diff --git a/pkg/controller/common/controller.go b/pkg/controller/common/controller.go new file mode 100644 index 0000000000..9daa0d486b --- /dev/null +++ b/pkg/controller/common/controller.go @@ -0,0 +1,6 @@ +package common + +// Controller is the common interface all controllers implement +type Controller interface{ + Run(workers int, stopCh <-chan struct{}) +} \ No newline at end of file diff --git a/cmd/common/controller_context.go b/pkg/controller/common/controller_context.go similarity index 80% rename from cmd/common/controller_context.go rename to pkg/controller/common/controller_context.go index dbd590632f..358030f62e 100644 --- a/cmd/common/controller_context.go +++ b/pkg/controller/common/controller_context.go @@ -1,18 +1,31 @@ package common import ( + "math/rand" "time" configinformers "github.com/openshift/client-go/config/informers/externalversions" + "github.com/openshift/machine-config-operator/internal/clients" mcfginformers "github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions" apiextinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/informers" ) +const ( + minResyncPeriod = 20 * time.Minute +) + +func resyncPeriod() func() time.Duration { + return func() time.Duration { + factor := rand.Float64() + 1 + return time.Duration(float64(minResyncPeriod.Nanoseconds()) * factor) + } +} + // ControllerContext stores all the informers for a variety of kubernetes objects. type ControllerContext struct { - ClientBuilder *ClientBuilder + ClientBuilder *clients.Builder NamespacedInformerFactory mcfginformers.SharedInformerFactory InformerFactory mcfginformers.SharedInformerFactory @@ -31,7 +44,7 @@ type ControllerContext struct { } // CreateControllerContext creates the ControllerContext with the ClientBuilder. -func CreateControllerContext(cb *ClientBuilder, stop <-chan struct{}, targetNamespace string) *ControllerContext { +func CreateControllerContext(cb *clients.Builder, stop <-chan struct{}, targetNamespace string) *ControllerContext { client := cb.MachineConfigClientOrDie("machine-config-shared-informer") kubeClient := cb.KubeClientOrDie("kube-shared-informer") apiExtClient := cb.APIExtClientOrDie("apiext-shared-informer") @@ -51,8 +64,8 @@ func CreateControllerContext(cb *ClientBuilder, stop <-chan struct{}, targetName KubeNamespacedInformerFactory: kubeNamespacedSharedInformer, APIExtInformerFactory: apiExtSharedInformer, ConfigInformerFactory: configSharedInformer, - Stop: stop, - InformersStarted: make(chan struct{}), - ResyncPeriod: resyncPeriod(), + Stop: stop, + InformersStarted: make(chan struct{}), + ResyncPeriod: resyncPeriod(), } } diff --git a/test/e2e/framework/clientset.go b/test/e2e/framework/clientset.go new file mode 100644 index 0000000000..86c1791189 --- /dev/null +++ b/test/e2e/framework/clientset.go @@ -0,0 +1,52 @@ +package framework + +import ( + "os" + + "github.com/golang/glog" + clientconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + clientmachineconfigv1 "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/typed/machineconfiguration.openshift.io/v1" + clientapiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +type ClientSet struct { + clientcorev1.CoreV1Interface + clientappsv1.AppsV1Interface + clientconfigv1.ConfigV1Interface + clientmachineconfigv1.MachineconfigurationV1Interface + clientapiextensionsv1beta1.ApiextensionsV1beta1Interface +} + +// NewClientSet returns a *ClientBuilder with the given kubeconfig. +func NewClientSet(kubeconfig string) (*ClientSet) { + var config *rest.Config + var err error + + if kubeconfig == "" { + kubeconfig = os.Getenv("KUBECONFIG") + } + + if kubeconfig != "" { + glog.V(4).Infof("Loading kube client config from path %q", kubeconfig) + config, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + } else { + glog.V(4).Infof("Using in-cluster kube client config") + config, err = rest.InClusterConfig() + } + if err != nil { + panic(err) + } + + clientSet := &ClientSet{} + clientSet.CoreV1Interface = clientcorev1.NewForConfigOrDie(config) + clientSet.ConfigV1Interface = clientconfigv1.NewForConfigOrDie(config) + clientSet.MachineconfigurationV1Interface = clientmachineconfigv1.NewForConfigOrDie(config) + clientSet.ApiextensionsV1beta1Interface = clientapiextensionsv1beta1.NewForConfigOrDie(config) + clientSet.AppsV1Interface = clientappsv1.NewForConfigOrDie(config) + + return clientSet +} diff --git a/test/e2e/mcd_test.go b/test/e2e/mcd_test.go index 0dc05f2f28..5127eea8a7 100644 --- a/test/e2e/mcd_test.go +++ b/test/e2e/mcd_test.go @@ -7,37 +7,31 @@ import ( "time" ignv2_2types "github.com/coreos/ignition/config/v2_2/types" + mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + "github.com/openshift/machine-config-operator/pkg/daemon/constants" + "github.com/openshift/machine-config-operator/test/e2e/framework" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - - "github.com/openshift/machine-config-operator/cmd/common" - mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - "github.com/openshift/machine-config-operator/pkg/daemon/constants" ) // Test case for https://github.com/openshift/machine-config-operator/issues/358 func TestMCDToken(t *testing.T) { - cb, err := common.NewClientBuilder("") - if err != nil { - t.Errorf("%#v", err) - } - k := cb.KubeClientOrDie("mcd-token-test") + cs := framework.NewClientSet("") listOptions := metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set{"k8s-app": "machine-config-daemon"}).String(), } - mcdList, err := k.CoreV1().Pods("openshift-machine-config-operator").List(listOptions) + mcdList, err := cs.Pods("openshift-machine-config-operator").List(listOptions) if err != nil { t.Fatalf("%#v", err) } for _, pod := range mcdList.Items { - res, err := k.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).DoRaw() + res, err := cs.Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).DoRaw() if err != nil { t.Errorf("%s", err) } @@ -95,18 +89,13 @@ func createMCToAddFile(name, filename, data, fs string) *mcv1.MachineConfig { } func TestMCDeployed(t *testing.T) { - cb, err := common.NewClientBuilder("") - if err != nil { - t.Errorf("%#v", err) - } - mcClient := cb.MachineConfigClientOrDie("mc-file-add") - k := cb.KubeClientOrDie("mc-file-add") + cs := framework.NewClientSet("") for i := 0; i < 10; i++ { mcadd := createMCToAddFile("add-a-file", fmt.Sprintf("/etc/mytestconf%d", i), "test", "root") // create the dummy MC now - _, err = mcClient.MachineconfigurationV1().MachineConfigs().Create(mcadd) + _, err := cs.MachineConfigs().Create(mcadd) if err != nil { t.Errorf("failed to create machine config %v", err) } @@ -114,7 +103,7 @@ func TestMCDeployed(t *testing.T) { // grab the latest worker- MC var newMCName string if err := wait.PollImmediate(2*time.Second, 5*time.Minute, func() (bool, error) { - mcp, err := mcClient.MachineconfigurationV1().MachineConfigPools().Get("worker", metav1.GetOptions{}) + mcp, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) if err != nil { return false, err } @@ -131,7 +120,7 @@ func TestMCDeployed(t *testing.T) { visited := make(map[string]bool) if err := wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) { - nodes, err := getNodesByRole(k, "worker") + nodes, err := getNodesByRole(cs, "worker") if err != nil { return false, err } @@ -154,11 +143,11 @@ func TestMCDeployed(t *testing.T) { } } -func getNodesByRole(k kubernetes.Interface, role string) ([]v1.Node, error) { +func getNodesByRole(cs *framework.ClientSet, role string) ([]v1.Node, error) { listOptions := metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set{fmt.Sprintf("node-role.kubernetes.io/%s", role): ""}).String(), } - nodes, err := k.CoreV1().Nodes().List(listOptions) + nodes, err := cs.Nodes().List(listOptions) if err != nil { return nil, err } @@ -166,12 +155,7 @@ func getNodesByRole(k kubernetes.Interface, role string) ([]v1.Node, error) { } func TestReconcileAfterBadMC(t *testing.T) { - cb, err := common.NewClientBuilder("") - if err != nil { - t.Errorf("%#v", err) - } - mcClient := cb.MachineConfigClientOrDie("mc-file-add") - k := cb.KubeClientOrDie("mc-file-add") + cs := framework.NewClientSet("") // create a bad MC w/o a filesystem field which is going to fail reconciling mcadd := createMCToAddFile("add-a-file", "/etc/mytestconfs", "test", "") @@ -179,14 +163,14 @@ func TestReconcileAfterBadMC(t *testing.T) { // grab the initial machineconfig used by the worker pool // this MC is gonna be the one which is going to be reapplied once the bad MC is deleted // and we need it for the final check - mcp, err := mcClient.MachineconfigurationV1().MachineConfigPools().Get("worker", metav1.GetOptions{}) + mcp, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) if err != nil { t.Error(err) } workerOldMc := mcp.Status.Configuration.Name // create the dummy MC now - _, err = mcClient.MachineconfigurationV1().MachineConfigs().Create(mcadd) + _, err = cs.MachineConfigs().Create(mcadd) if err != nil { t.Errorf("failed to create machine config %v", err) } @@ -194,7 +178,7 @@ func TestReconcileAfterBadMC(t *testing.T) { // grab the latest worker- MC var newMCName string if err := wait.PollImmediate(2*time.Second, 5*time.Minute, func() (bool, error) { - mcp, err := mcClient.MachineconfigurationV1().MachineConfigPools().Get("worker", metav1.GetOptions{}) + mcp, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) if err != nil { return false, err } @@ -211,7 +195,7 @@ func TestReconcileAfterBadMC(t *testing.T) { // verify that one node picked the above up if err := wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) { - nodes, err := getNodesByRole(k, "worker") + nodes, err := getNodesByRole(cs, "worker") if err != nil { return false, err } @@ -227,7 +211,7 @@ func TestReconcileAfterBadMC(t *testing.T) { // verify that we got indeed an unavailable machine in the pool if err := wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) { - mcp, err := mcClient.MachineconfigurationV1().MachineConfigPools().Get("worker", metav1.GetOptions{}) + mcp, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) if err != nil { return false, err } @@ -240,13 +224,13 @@ func TestReconcileAfterBadMC(t *testing.T) { } // now delete the bad MC and watch the nodes reconciling as expected - if err := mcClient.MachineconfigurationV1().MachineConfigs().Delete(mcadd.Name, &metav1.DeleteOptions{}); err != nil { + if err := cs.MachineConfigs().Delete(mcadd.Name, &metav1.DeleteOptions{}); err != nil { t.Error(err) } // wait for the mcp to go back to previous config if err := wait.PollImmediate(2*time.Second, 5*time.Minute, func() (bool, error) { - mcp, err := mcClient.MachineconfigurationV1().MachineConfigPools().Get("worker", metav1.GetOptions{}) + mcp, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) if err != nil { return false, err } @@ -260,11 +244,11 @@ func TestReconcileAfterBadMC(t *testing.T) { visited := make(map[string]bool) if err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) { - nodes, err := getNodesByRole(k, "worker") + nodes, err := getNodesByRole(cs, "worker") if err != nil { return false, err } - mcp, err = mcClient.MachineconfigurationV1().MachineConfigPools().Get("worker", metav1.GetOptions{}) + mcp, err = cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/mco_test.go b/test/e2e/mco_test.go index 7393217927..1b303bb77f 100644 --- a/test/e2e/mco_test.go +++ b/test/e2e/mco_test.go @@ -3,17 +3,14 @@ package e2e_test import ( "testing" - "github.com/openshift/machine-config-operator/cmd/common" + "github.com/openshift/machine-config-operator/test/e2e/framework" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestClusterOperatorRelatedObjects(t *testing.T) { - cb, err := common.NewClientBuilder("") - if err != nil { - t.Errorf("%#v", err) - } - configClient := cb.ConfigClientOrDie("test-co-related-objects") - co, err := configClient.ConfigV1().ClusterOperators().Get("machine-config", metav1.GetOptions{}) + cs := framework.NewClientSet("") + + co, err := cs.ClusterOperators().Get("machine-config", metav1.GetOptions{}) if err != nil { t.Errorf("couldn't get clusteroperator %v", err) } diff --git a/test/e2e/osimageurl_test.go b/test/e2e/osimageurl_test.go index 01c481805e..d23757ff53 100644 --- a/test/e2e/osimageurl_test.go +++ b/test/e2e/osimageurl_test.go @@ -3,25 +3,20 @@ package e2e_test import ( "testing" + "github.com/openshift/machine-config-operator/test/e2e/framework" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/openshift/machine-config-operator/cmd/common" ) func TestOSImageURL(t *testing.T) { - cb, err := common.NewClientBuilder("") - if err != nil { - t.Fatalf("%#v", err) - } - mcClient := cb.MachineConfigClientOrDie("mc-file-add") + cs := framework.NewClientSet("") // grab the latest worker- MC - mcp, err := mcClient.MachineconfigurationV1().MachineConfigPools().Get("worker", metav1.GetOptions{}) + mcp, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) if err != nil { t.Fatalf("%#v", err) } - mc, err := mcClient.MachineconfigurationV1().MachineConfigs().Get(mcp.Status.Configuration.Name, metav1.GetOptions{}) + mc, err := cs.MachineConfigs().Get(mcp.Status.Configuration.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("%#v", err) } @@ -31,11 +26,11 @@ func TestOSImageURL(t *testing.T) { } // grab the latest master- MC - mcp, err = mcClient.MachineconfigurationV1().MachineConfigPools().Get("master", metav1.GetOptions{}) + mcp, err = cs.MachineConfigPools().Get("master", metav1.GetOptions{}) if err != nil { t.Fatalf("%#v", err) } - mc, err = mcClient.MachineconfigurationV1().MachineConfigs().Get(mcp.Status.Configuration.Name, metav1.GetOptions{}) + mc, err = cs.MachineConfigs().Get(mcp.Status.Configuration.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("%#v", err) } diff --git a/test/e2e/sanity_test.go b/test/e2e/sanity_test.go index 794e377ce8..f304edb33f 100644 --- a/test/e2e/sanity_test.go +++ b/test/e2e/sanity_test.go @@ -3,20 +3,15 @@ package e2e_test import ( "testing" + "github.com/openshift/machine-config-operator/test/e2e/framework" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/openshift/machine-config-operator/cmd/common" ) // Test case for https://github.com/openshift/machine-config-operator/pull/288/commits/44d5c5215b5450fca32806f796b50a3372daddc2 func TestOperatorLabel(t *testing.T) { - cb, err := common.NewClientBuilder("") - if err != nil{ - t.Errorf("%#v", err) - } - k := cb.KubeClientOrDie("sanity-test") + cs := framework.NewClientSet("") - d, err := k.AppsV1().DaemonSets("openshift-machine-config-operator").Get("machine-config-daemon", metav1.GetOptions{}) + d, err := cs.DaemonSets("openshift-machine-config-operator").Get("machine-config-daemon", metav1.GetOptions{}) if err != nil { t.Errorf("%#v", err) } From 1aebad010f4e3a5feb394dfe3355da3dda3de21b Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Sun, 17 Mar 2019 16:51:40 +0100 Subject: [PATCH 2/3] controller/render: fix race with template controller The template controller is responsible for generating the initial MCs but the render controller can kick in before the template is done. Add a sync mechanism by looking at the controller config status as that's the souce of truth to understand if a sync is done if the controller config changes and the template controller runs again. Signed-off-by: Antonio Murdaca --- cmd/machine-config-controller/start.go | 1 + pkg/controller/render/render_controller.go | 46 ++++++++++++++++++- .../render/render_controller_test.go | 37 ++++++++++++++- 3 files changed, 81 insertions(+), 3 deletions(-) diff --git a/cmd/machine-config-controller/start.go b/cmd/machine-config-controller/start.go index 11263b451e..479a660c30 100644 --- a/cmd/machine-config-controller/start.go +++ b/cmd/machine-config-controller/start.go @@ -121,6 +121,7 @@ func createControllers(ctx *controllercommon.ControllerContext) []controllercomm render.New( ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), ctx.InformerFactory.Machineconfiguration().V1().MachineConfigs(), + ctx.InformerFactory.Machineconfiguration().V1().ControllerConfigs(), ctx.ClientBuilder.KubeClientOrDie("render-controller"), ctx.ClientBuilder.MachineConfigClientOrDie("render-controller"), ), diff --git a/pkg/controller/render/render_controller.go b/pkg/controller/render/render_controller.go index 5710167645..49a3ec5c09 100644 --- a/pkg/controller/render/render_controller.go +++ b/pkg/controller/render/render_controller.go @@ -63,6 +63,9 @@ type Controller struct { mcpListerSynced cache.InformerSynced mcListerSynced cache.InformerSynced + ccLister mcfglistersv1.ControllerConfigLister + ccListerSynced cache.InformerSynced + queue workqueue.RateLimitingInterface } @@ -70,6 +73,7 @@ type Controller struct { func New( mcpInformer mcfginformersv1.MachineConfigPoolInformer, mcInformer mcfginformersv1.MachineConfigInformer, + ccInformer mcfginformersv1.ControllerConfigInformer, kubeClient clientset.Interface, mcfgClient mcfgclientset.Interface, ) *Controller { @@ -101,6 +105,8 @@ func New( ctrl.mcLister = mcInformer.Lister() ctrl.mcpListerSynced = mcpInformer.Informer().HasSynced ctrl.mcListerSynced = mcInformer.Informer().HasSynced + ctrl.ccLister = ccInformer.Lister() + ctrl.ccListerSynced = ccInformer.Informer().HasSynced return ctrl } @@ -113,7 +119,7 @@ func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { glog.Info("Starting MachineConfigController-RenderController") defer glog.Info("Shutting down MachineConfigController-RenderController") - if !cache.WaitForCacheSync(stopCh, ctrl.mcpListerSynced, ctrl.mcListerSynced) { + if !cache.WaitForCacheSync(stopCh, ctrl.mcpListerSynced, ctrl.mcListerSynced, ctrl.ccListerSynced) { return } @@ -130,6 +136,7 @@ func (ctrl *Controller) addMachineConfigPool(obj interface{}) { ctrl.enqueueMachineConfigPool(pool) } + func (ctrl *Controller) updateMachineConfigPool(old, cur interface{}) { oldPool := old.(*mcfgv1.MachineConfigPool) curPool := cur.(*mcfgv1.MachineConfigPool) @@ -137,6 +144,7 @@ func (ctrl *Controller) updateMachineConfigPool(old, cur interface{}) { glog.V(4).Infof("Updating MachineConfigPool %s", oldPool.Name) ctrl.enqueueMachineConfigPool(curPool) } + func (ctrl *Controller) deleteMachineConfigPool(obj interface{}) { pool, ok := obj.(*mcfgv1.MachineConfigPool) if !ok { @@ -183,6 +191,7 @@ func (ctrl *Controller) addMachineConfig(obj interface{}) { ctrl.enqueueMachineConfigPool(p) } } + func (ctrl *Controller) updateMachineConfig(old, cur interface{}) { oldMC := old.(*mcfgv1.MachineConfig) curMC := cur.(*mcfgv1.MachineConfig) @@ -216,6 +225,7 @@ func (ctrl *Controller) updateMachineConfig(old, cur interface{}) { ctrl.enqueueMachineConfigPool(p) } } + func (ctrl *Controller) deleteMachineConfig(obj interface{}) { mc, ok := obj.(*mcfgv1.MachineConfig) @@ -414,6 +424,11 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { return err } + // TODO(runcom): add tests in render_controller_test.go for this condition + if err := ctrl.isControllerConfigCompleted(); err != nil { + return err + } + mcs, err := ctrl.mcLister.List(selector) if err != nil { return err @@ -425,6 +440,35 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { return ctrl.syncGeneratedMachineConfig(pool, mcs) } +func (ctrl *Controller) isControllerConfigCompleted() error { + cc, err := ctrl.ccLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("could not enumerate ControllerConfig %s", err) + } + if len(cc) == 0 { + return fmt.Errorf("controllerConfigList is empty") + } + + cur, err := ctrl.ccLister.Get(cc[0].GetName()) + if err != nil { + return err + } + + if cur.Generation != cur.Status.ObservedGeneration { + return fmt.Errorf("status for ControllerConfig %s is being reported for %d, expecting it for %d", cc[0].GetName(), cur.Status.ObservedGeneration, cur.Generation) + } + + completed := mcfgv1.IsControllerConfigStatusConditionTrue(cur.Status.Conditions, mcfgv1.TemplateContollerCompleted) + running := mcfgv1.IsControllerConfigStatusConditionTrue(cur.Status.Conditions, mcfgv1.TemplateContollerRunning) + failing := mcfgv1.IsControllerConfigStatusConditionTrue(cur.Status.Conditions, mcfgv1.TemplateContollerFailing) + if completed && + !running && + !failing { + return nil + } + return fmt.Errorf("ControllerConfig has not completed: completed(%v) running(%v) failing(%v)", completed, running, failing) +} + // This function will eventually contain a sane garbage collection policy for rendered MachineConfigs; // see https://github.com/openshift/machine-config-operator/issues/301 // It will probably involve making sure we're only GCing a config after all nodes don't have it diff --git a/pkg/controller/render/render_controller_test.go b/pkg/controller/render/render_controller_test.go index 3994a010d0..d393bc8794 100644 --- a/pkg/controller/render/render_controller_test.go +++ b/pkg/controller/render/render_controller_test.go @@ -1,6 +1,7 @@ package render import ( + "fmt" "reflect" "testing" "time" @@ -36,6 +37,7 @@ type fixture struct { mcpLister []*mcfgv1.MachineConfigPool mcLister []*mcfgv1.MachineConfig + ccLister []*mcfgv1.ControllerConfig actions []core.Action @@ -82,11 +84,12 @@ func (f *fixture) newController() *Controller { f.client = fake.NewSimpleClientset(f.objects...) i := informers.NewSharedInformerFactory(f.client, noResyncPeriodFunc()) - c := New(i.Machineconfiguration().V1().MachineConfigPools(), i.Machineconfiguration().V1().MachineConfigs(), + c := New(i.Machineconfiguration().V1().MachineConfigPools(), i.Machineconfiguration().V1().MachineConfigs(), i.Machineconfiguration().V1().ControllerConfigs(), k8sfake.NewSimpleClientset(), f.client) c.mcpListerSynced = alwaysReady c.mcListerSynced = alwaysReady + c.ccListerSynced = alwaysReady c.eventRecorder = &record.FakeRecorder{} stopCh := make(chan struct{}) @@ -94,10 +97,12 @@ func (f *fixture) newController() *Controller { i.Start(stopCh) i.WaitForCacheSync(stopCh) + for _, c := range f.ccLister { + i.Machineconfiguration().V1().ControllerConfigs().Informer().GetIndexer().Add(c) + } for _, c := range f.mcpLister { i.Machineconfiguration().V1().MachineConfigPools().Informer().GetIndexer().Add(c) } - for _, m := range f.mcLister { i.Machineconfiguration().V1().MachineConfigs().Informer().GetIndexer().Add(m) } @@ -192,6 +197,8 @@ func filterInformerActions(actions []core.Action) []core.Action { if len(action.GetNamespace()) == 0 && (action.Matches("list", "machineconfigpools") || action.Matches("watch", "machineconfigpools") || + action.Matches("list", "controllerconfigs") || + action.Matches("watch", "controllerconfigs") || action.Matches("list", "machineconfigs") || action.Matches("watch", "machineconfigs")) { continue @@ -222,6 +229,24 @@ func (f *fixture) expectUpdateMachineConfigPoolStatus(pool *mcfgv1.MachineConfig f.actions = append(f.actions, core.NewRootUpdateSubresourceAction(schema.GroupVersionResource{Resource: "machineconfigpools"}, "status", pool)) } +func newControllerConfig(name string) *mcfgv1.ControllerConfig { + return &mcfgv1.ControllerConfig{ + TypeMeta: metav1.TypeMeta{APIVersion: mcfgv1.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{Name: name, UID: types.UID(utilrand.String(5))}, + Spec: mcfgv1.ControllerConfigSpec{ + EtcdDiscoveryDomain: fmt.Sprintf("%s.tt.testing", name), + }, + Status: mcfgv1.ControllerConfigStatus{ + Conditions: []mcfgv1.ControllerConfigStatusCondition{ + { + Type: mcfgv1.TemplateContollerCompleted, + Status: corev1.ConditionTrue, + }, + }, + }, + } +} + func TestCreatesGeneratedMachineConfig(t *testing.T) { f := newFixture(t) mcp := newMachineConfigPool("test-cluster-master", metav1.AddLabelToSelector(&metav1.LabelSelector{}, "node-role", "master"), "") @@ -238,6 +263,8 @@ func TestCreatesGeneratedMachineConfig(t *testing.T) { newMachineConfig("00-test-cluster-master", map[string]string{"node-role": "master"}, "dummy://", []ignv2_2types.File{files[0]}), newMachineConfig("05-extra-master", map[string]string{"node-role": "master"}, "dummy://1", []ignv2_2types.File{files[1]}), } + cc := newControllerConfig("test-cluster-configcreate") + f.ccLister = append(f.ccLister, cc) f.mcpLister = append(f.mcpLister, mcp) f.objects = append(f.objects, mcp) @@ -299,6 +326,9 @@ func TestUpdatesGeneratedMachineConfig(t *testing.T) { gmc.Spec.OSImageURL = "why-did-you-change-it" mcp.Status.Configuration.Name = gmc.Name + cc := newControllerConfig("test-cluster-configcreate") + f.ccLister = append(f.ccLister, cc) + f.mcpLister = append(f.mcpLister, mcp) f.objects = append(f.objects, mcp) f.mcLister = append(f.mcLister, mcs...) @@ -341,6 +371,9 @@ func TestDoNothing(t *testing.T) { } mcp.Status.Configuration.Name = gmc.Name + cc := newControllerConfig("test-cluster-configcreate") + f.ccLister = append(f.ccLister, cc) + f.mcpLister = append(f.mcpLister, mcp) f.objects = append(f.objects, mcp) f.mcLister = append(f.mcLister, mcs...) From 1e381d258cd31c591a31b061a9795b526f79058b Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Sun, 17 Mar 2019 23:29:06 +0100 Subject: [PATCH 3/3] move common api method to the apis pkg Signed-off-by: Antonio Murdaca --- .../v1/helpers.go | 23 +++++++ .../v1/helpers_test.go | 61 ++++++++++++++++++- pkg/controller/render/render_controller.go | 20 +----- pkg/operator/status.go | 21 ------- pkg/operator/status_test.go | 58 ------------------ pkg/operator/sync.go | 2 +- 6 files changed, 85 insertions(+), 100 deletions(-) diff --git a/pkg/apis/machineconfiguration.openshift.io/v1/helpers.go b/pkg/apis/machineconfiguration.openshift.io/v1/helpers.go index e19bb703db..62cbd0bef1 100644 --- a/pkg/apis/machineconfiguration.openshift.io/v1/helpers.go +++ b/pkg/apis/machineconfiguration.openshift.io/v1/helpers.go @@ -1,6 +1,7 @@ package v1 import ( + "fmt" "sort" ignv2_2 "github.com/coreos/ignition/config/v2_2" @@ -203,3 +204,25 @@ func IsControllerConfigStatusConditionPresentAndEqual(conditions []ControllerCon } return false } + +// IsControllerConfigCompleted checks whether a ControllerConfig is completed by the Template Controller +func IsControllerConfigCompleted(cc *ControllerConfig, ccGetter func(string) (*ControllerConfig, error)) error { + cur, err := ccGetter(cc.GetName()) + if err != nil { + return err + } + + if cur.Generation != cur.Status.ObservedGeneration { + return fmt.Errorf("status for ControllerConfig %s is being reported for %d, expecting it for %d", cc.GetName(), cur.Status.ObservedGeneration, cur.Generation) + } + + completed := IsControllerConfigStatusConditionTrue(cur.Status.Conditions, TemplateContollerCompleted) + running := IsControllerConfigStatusConditionTrue(cur.Status.Conditions, TemplateContollerRunning) + failing := IsControllerConfigStatusConditionTrue(cur.Status.Conditions, TemplateContollerFailing) + if completed && + !running && + !failing { + return nil + } + return fmt.Errorf("ControllerConfig has not completed: completed(%v) running(%v) failing(%v)", completed, running, failing) +} diff --git a/pkg/apis/machineconfiguration.openshift.io/v1/helpers_test.go b/pkg/apis/machineconfiguration.openshift.io/v1/helpers_test.go index bfb00c1b24..be31c91c29 100644 --- a/pkg/apis/machineconfiguration.openshift.io/v1/helpers_test.go +++ b/pkg/apis/machineconfiguration.openshift.io/v1/helpers_test.go @@ -1,6 +1,7 @@ package v1 import ( + "errors" "fmt" "reflect" "testing" @@ -134,7 +135,7 @@ func TestMergeMachineConfigs(t *testing.T) { configs = append(configs, &MachineConfig{ ObjectMeta: metav1.ObjectMeta{Name: "3"}, Spec: MachineConfigSpec{ - OSImageURL:"example.com/os@sha256:notthetarget", + OSImageURL: "example.com/os@sha256:notthetarget", }, }) @@ -165,3 +166,61 @@ func TestMergeMachineConfigs(t *testing.T) { t.Errorf("OSImageURL expected: %s, received: %s", targetOSImageURL, merged.Spec.OSImageURL) } } + +func TestIsControllerConfigCompleted(t *testing.T) { + tests := []struct { + obsrvdGen int64 + completed bool + running bool + failing bool + + err error + }{{ + obsrvdGen: 0, + err: errors.New("status for ControllerConfig dummy is being reported for 0, expecting it for 1"), + }, { + obsrvdGen: 1, + running: true, + err: errors.New("ControllerConfig has not completed: completed(false) running(true) failing(false)"), + }, { + obsrvdGen: 1, + completed: true, + }, { + obsrvdGen: 1, + completed: true, + running: true, + err: errors.New("ControllerConfig has not completed: completed(true) running(true) failing(false)"), + }, { + obsrvdGen: 1, + failing: true, + err: errors.New("ControllerConfig has not completed: completed(false) running(false) failing(true)"), + }} + for idx, test := range tests { + t.Run(fmt.Sprintf("#%d", idx), func(t *testing.T) { + getter := func(name string) (*ControllerConfig, error) { + var conds []ControllerConfigStatusCondition + if test.completed { + conds = append(conds, *NewControllerConfigStatusCondition(TemplateContollerCompleted, corev1.ConditionTrue, "", "")) + } + if test.running { + conds = append(conds, *NewControllerConfigStatusCondition(TemplateContollerRunning, corev1.ConditionTrue, "", "")) + } + if test.failing { + conds = append(conds, *NewControllerConfigStatusCondition(TemplateContollerFailing, corev1.ConditionTrue, "", "")) + } + return &ControllerConfig{ + ObjectMeta: metav1.ObjectMeta{Generation: 1, Name: name}, + Status: ControllerConfigStatus{ + ObservedGeneration: test.obsrvdGen, + Conditions: conds, + }, + }, nil + } + + err := IsControllerConfigCompleted(&ControllerConfig{ObjectMeta: metav1.ObjectMeta{Generation: 1, Name: "dummy"}}, getter) + if !reflect.DeepEqual(err, test.err) { + t.Fatalf("expected %v got %v", test.err, err) + } + }) + } +} diff --git a/pkg/controller/render/render_controller.go b/pkg/controller/render/render_controller.go index 49a3ec5c09..fdeedcdff1 100644 --- a/pkg/controller/render/render_controller.go +++ b/pkg/controller/render/render_controller.go @@ -448,25 +448,7 @@ func (ctrl *Controller) isControllerConfigCompleted() error { if len(cc) == 0 { return fmt.Errorf("controllerConfigList is empty") } - - cur, err := ctrl.ccLister.Get(cc[0].GetName()) - if err != nil { - return err - } - - if cur.Generation != cur.Status.ObservedGeneration { - return fmt.Errorf("status for ControllerConfig %s is being reported for %d, expecting it for %d", cc[0].GetName(), cur.Status.ObservedGeneration, cur.Generation) - } - - completed := mcfgv1.IsControllerConfigStatusConditionTrue(cur.Status.Conditions, mcfgv1.TemplateContollerCompleted) - running := mcfgv1.IsControllerConfigStatusConditionTrue(cur.Status.Conditions, mcfgv1.TemplateContollerRunning) - failing := mcfgv1.IsControllerConfigStatusConditionTrue(cur.Status.Conditions, mcfgv1.TemplateContollerFailing) - if completed && - !running && - !failing { - return nil - } - return fmt.Errorf("ControllerConfig has not completed: completed(%v) running(%v) failing(%v)", completed, running, failing) + return mcfgv1.IsControllerConfigCompleted(cc[0], ctrl.ccLister.Get) } // This function will eventually contain a sane garbage collection policy for rendered MachineConfigs; diff --git a/pkg/operator/status.go b/pkg/operator/status.go index bc6636291b..0b1696745a 100644 --- a/pkg/operator/status.go +++ b/pkg/operator/status.go @@ -217,27 +217,6 @@ func (optr *Operator) allMachineConfigPoolStatus() (map[string]string, error) { return ret, nil } -func isControllerConfigCompleted(cc *mcfgv1.ControllerConfig, ccGetter func(string) (*mcfgv1.ControllerConfig, error)) error { - cur, err := ccGetter(cc.GetName()) - if err != nil { - return err - } - - if cur.Generation != cur.Status.ObservedGeneration { - return fmt.Errorf("status for ControllerConfig %s is being reported for %d, expecting it for %d", cc.GetName(), cur.Status.ObservedGeneration, cur.Generation) - } - - completed := mcfgv1.IsControllerConfigStatusConditionTrue(cur.Status.Conditions, mcfgv1.TemplateContollerCompleted) - running := mcfgv1.IsControllerConfigStatusConditionTrue(cur.Status.Conditions, mcfgv1.TemplateContollerRunning) - failing := mcfgv1.IsControllerConfigStatusConditionTrue(cur.Status.Conditions, mcfgv1.TemplateContollerFailing) - if completed && - !running && - !failing { - return nil - } - return fmt.Errorf("ControllerConfig has not completed: as completed(%v) running(%v) failing(%v)", completed, running, failing) -} - // isMachineConfigPoolConfigurationValid returns nil error when the configuration of a `pool` is created by the controller at version `version`. func isMachineConfigPoolConfigurationValid(pool *mcfgv1.MachineConfigPool, version string, machineConfigGetter func(string) (*mcfgv1.MachineConfig, error)) error { // both .status.configuration.name and .status.configuration.source must be set. diff --git a/pkg/operator/status_test.go b/pkg/operator/status_test.go index f44db1300a..c83dbca524 100644 --- a/pkg/operator/status_test.go +++ b/pkg/operator/status_test.go @@ -126,64 +126,6 @@ func TestIsMachineConfigPoolConfigurationValid(t *testing.T) { } } -func TestIsControllerConfigCompleted(t *testing.T) { - tests := []struct { - obsrvdGen int64 - completed bool - running bool - failing bool - - err error - }{{ - obsrvdGen: 0, - err: errors.New("status for ControllerConfig dummy is being reported for 0, expecting it for 1"), - }, { - obsrvdGen: 1, - running: true, - err: errors.New("ControllerConfig has not completed: as completed(false) running(true) failing(false)"), - }, { - obsrvdGen: 1, - completed: true, - }, { - obsrvdGen: 1, - completed: true, - running: true, - err: errors.New("ControllerConfig has not completed: as completed(true) running(true) failing(false)"), - }, { - obsrvdGen: 1, - failing: true, - err: errors.New("ControllerConfig has not completed: as completed(false) running(false) failing(true)"), - }} - for idx, test := range tests { - t.Run(fmt.Sprintf("#%d", idx), func(t *testing.T) { - getter := func(name string) (*mcfgv1.ControllerConfig, error) { - var conds []mcfgv1.ControllerConfigStatusCondition - if test.completed { - conds = append(conds, *mcfgv1.NewControllerConfigStatusCondition(mcfgv1.TemplateContollerCompleted, corev1.ConditionTrue, "", "")) - } - if test.running { - conds = append(conds, *mcfgv1.NewControllerConfigStatusCondition(mcfgv1.TemplateContollerRunning, corev1.ConditionTrue, "", "")) - } - if test.failing { - conds = append(conds, *mcfgv1.NewControllerConfigStatusCondition(mcfgv1.TemplateContollerFailing, corev1.ConditionTrue, "", "")) - } - return &mcfgv1.ControllerConfig{ - ObjectMeta: metav1.ObjectMeta{Generation: 1, Name: name}, - Status: mcfgv1.ControllerConfigStatus{ - ObservedGeneration: test.obsrvdGen, - Conditions: conds, - }, - }, nil - } - - err := isControllerConfigCompleted(&mcfgv1.ControllerConfig{ObjectMeta: metav1.ObjectMeta{Generation: 1, Name: "dummy"}}, getter) - if !reflect.DeepEqual(err, test.err) { - t.Fatalf("expected %v got %v", test.err, err) - } - }) - } -} - type mockMCPLister struct{} func (mcpl *mockMCPLister) List(selector labels.Selector) (ret []*mcfgv1.MachineConfigPool, err error) { diff --git a/pkg/operator/sync.go b/pkg/operator/sync.go index 4adf6b0078..74f5e287dc 100644 --- a/pkg/operator/sync.go +++ b/pkg/operator/sync.go @@ -451,7 +451,7 @@ func (optr *Operator) waitForDaemonsetRollout(resource *appsv1.DaemonSet) error func (optr *Operator) waitForControllerConfigToBeCompleted(resource *mcfgv1.ControllerConfig) error { var lastErr error if err := wait.Poll(controllerConfigCompletedInterval, controllerConfigCompletedTimeout, func() (bool, error) { - if err := isControllerConfigCompleted(resource, optr.ccLister.Get); err != nil { + if err := mcfgv1.IsControllerConfigCompleted(resource, optr.ccLister.Get); err != nil { lastErr = fmt.Errorf("controllerconfig is not completed: %v", err) return false, nil }