diff --git a/pkg/apis/machineconfiguration.openshift.io/v1/types.go b/pkg/apis/machineconfiguration.openshift.io/v1/types.go index 8cfa3ba29d..3c780e7d5b 100644 --- a/pkg/apis/machineconfiguration.openshift.io/v1/types.go +++ b/pkg/apis/machineconfiguration.openshift.io/v1/types.go @@ -358,6 +358,14 @@ const ( // MachineConfigPoolDegraded is the overall status of the pool based, today, on whether we fail with NodeDegraded or RenderDegraded MachineConfigPoolDegraded MachineConfigPoolConditionType = "Degraded" + + MachineConfigPoolBuildPending MachineConfigPoolConditionType = "BuildPending" + + MachineConfigPoolBuilding MachineConfigPoolConditionType = "Building" + + MachineConfigPoolBuildSuccess MachineConfigPoolConditionType = "BuildSuccess" + + MachineConfigPoolBuildFailed MachineConfigPoolConditionType = "BuildFailed" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/controller/build/build_controller.go b/pkg/controller/build/build_controller.go new file mode 100644 index 0000000000..d6550a8aa0 --- /dev/null +++ b/pkg/controller/build/build_controller.go @@ -0,0 +1,830 @@ +package build + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/golang/glog" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme" + corev1 "k8s.io/api/core/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + + imagelistersv1 "github.com/openshift/client-go/image/listers/image/v1" + + buildlistersv1 "github.com/openshift/client-go/build/listers/build/v1" + + buildclientset "github.com/openshift/client-go/build/clientset/versioned" + imageclientset "github.com/openshift/client-go/image/clientset/versioned" + + mcfgclientset "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned" + mcfginformersv1 "github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions/machineconfiguration.openshift.io/v1" + mcfglistersv1 "github.com/openshift/machine-config-operator/pkg/generated/listers/machineconfiguration.openshift.io/v1" + + coreinformersv1 "k8s.io/client-go/informers/core/v1" + + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + corelistersv1 "k8s.io/client-go/listers/core/v1" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" +) + +const ( + targetMachineConfigPoolLabel = "machineconfiguration.openshift.io/targetMachineConfigPool" + // TODO(zzlotnik): Is there a constant for this someplace else? + desiredConfigLabel = "machineconfiguration.openshift.io/desiredConfig" +) + +var ( + // controllerKind contains the schema.GroupVersionKind for this controller type. + //nolint:varcheck,deadcode // This will be used eventually + controllerKind = mcfgv1.SchemeGroupVersion.WithKind("MachineConfigPool") +) + +//nolint:revive // If I name this ControllerConfig, that name will be overloaded :P +type BuildControllerConfig struct { + // updateDelay is a pause to deal with churn in MachineConfigs; see + // https://github.com/openshift/machine-config-operator/issues/301 + // Default: 5 seconds + UpdateDelay time.Duration + + // maxRetries is the number of times a machineconfig pool will be retried before it is dropped out of the queue. + // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times + // a machineconfig pool is going to be requeued: + // + // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s + // Default: 5 + MaxRetries int +} + +// Controller defines the build controller. +type Controller struct { + client mcfgclientset.Interface + imageclient imageclientset.Interface + buildclient buildclientset.Interface + kubeclient clientset.Interface + + eventRecorder record.EventRecorder + + syncHandler func(mcp string) error + enqueueMachineConfigPool func(*mcfgv1.MachineConfigPool) + + ccLister mcfglistersv1.ControllerConfigLister + mcpLister mcfglistersv1.MachineConfigPoolLister + bLister buildlistersv1.BuildLister + bcLister buildlistersv1.BuildConfigLister + isLister imagelistersv1.ImageStreamLister + podLister corelistersv1.PodLister + + ccListerSynced cache.InformerSynced + mcpListerSynced cache.InformerSynced + podListerSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface + + config BuildControllerConfig +} + +func DefaultBuildControllerConfig() BuildControllerConfig { + return BuildControllerConfig{ + MaxRetries: 5, + UpdateDelay: time.Second * 5, + } +} + +// New returns a new node controller. +func New( + ctrlConfig BuildControllerConfig, + podInformer coreinformersv1.PodInformer, + ccInformer mcfginformersv1.ControllerConfigInformer, + mcpInformer mcfginformersv1.MachineConfigPoolInformer, + mcfgClient mcfgclientset.Interface, + kubeClient clientset.Interface, +) *Controller { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) + + ctrl := &Controller{ + client: mcfgClient, + kubeclient: kubeClient, + eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineosbuilder-buildcontroller"}), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineosbuilder-buildcontroller"), + config: ctrlConfig, + } + + // As an aside, why doesn't the constructor here set up all the informers? + podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addPod, + UpdateFunc: ctrl.updatePod, + DeleteFunc: ctrl.deletePod, + }) + + mcpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addMachineConfigPool, + UpdateFunc: ctrl.updateMachineConfigPool, + DeleteFunc: ctrl.deleteMachineConfigPool, + }) + + ctrl.syncHandler = ctrl.syncMachineConfigPool + ctrl.enqueueMachineConfigPool = ctrl.enqueueDefault + + ctrl.ccLister = ccInformer.Lister() + ctrl.mcpLister = mcpInformer.Lister() + ctrl.podLister = podInformer.Lister() + + ctrl.ccListerSynced = ccInformer.Informer().HasSynced + ctrl.mcpListerSynced = mcpInformer.Informer().HasSynced + ctrl.podListerSynced = podInformer.Informer().HasSynced + + return ctrl +} + +func (ctrl *Controller) addPod(obj interface{}) { + pod := obj.(*corev1.Pod) + glog.V(4).Infof("Adding Pod %s. Is build pod? %v", pod.Name, isBuildPod(pod)) +} + +func (ctrl *Controller) updatePod(oldObj, curObj interface{}) { + curPod := curObj.(*corev1.Pod).DeepCopy() + + // Ignore non-build pods. + // TODO: Figure out if we can add the filter criteria onto the lister. + if !isBuildPod(curPod) { + return + } + + pool, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), curPod.Labels[targetMachineConfigPoolLabel], metav1.GetOptions{}) + if err != nil { + ctrl.handleErr(err, curPod.Name) + return + } + + switch curPod.Status.Phase { + case corev1.PodPending: + glog.Infof("Build pod (%s) is pending", curPod.Name) + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildPending) { + err = ctrl.markBuildPending(pool) + } + case corev1.PodRunning: + // If we're running, then there's nothing to do right now. + glog.Infof("Build pod (%s) is running", curPod.Name) + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuilding) { + err = ctrl.markBuildInProgress(pool) + } + case corev1.PodSucceeded: + // If we've succeeded, we need to update the pool to indicate that. + glog.Infof("Build pod (%s) has succeeded", curPod.Name) + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildSuccess) { + err = ctrl.markBuildSucceeded(pool) + } + case corev1.PodFailed: + // If we've failed, we need to update the pool to indicate that. + glog.Infof("Build pod (%s) failed", curPod.Name) + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildFailed) { + err = ctrl.markBuildFailed(pool) + } + } + + if err != nil { + ctrl.handleErr(err, pool.Name) + return + } + + ctrl.enqueueMachineConfigPool(pool) +} + +func (ctrl *Controller) deletePod(obj interface{}) { + pod := obj.(*corev1.Pod) + glog.V(4).Infof("Deleting Pod %s. Is build pod? %v", pod.Name, isBuildPod(pod)) +} + +// Run executes the render controller. +func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer ctrl.queue.ShutDown() + + if !cache.WaitForCacheSync(stopCh, ctrl.mcpListerSynced, ctrl.ccListerSynced, ctrl.podListerSynced) { + return + } + + glog.Info("Starting MachineOSBuilder-BuildController") + defer glog.Info("Shutting down MachineOSBuilder-BuildController") + + for i := 0; i < workers; i++ { + go wait.Until(ctrl.worker, time.Second, stopCh) + } + + <-stopCh +} + +func (ctrl *Controller) enqueue(pool *mcfgv1.MachineConfigPool) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) + return + } + + ctrl.queue.Add(key) +} + +func (ctrl *Controller) enqueueRateLimited(pool *mcfgv1.MachineConfigPool) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) + return + } + + ctrl.queue.AddRateLimited(key) +} + +// enqueueAfter will enqueue a pool after the provided amount of time. +func (ctrl *Controller) enqueueAfter(pool *mcfgv1.MachineConfigPool, after time.Duration) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) + return + } + + ctrl.queue.AddAfter(key, after) +} + +// enqueueDefault calls a default enqueue function +func (ctrl *Controller) enqueueDefault(pool *mcfgv1.MachineConfigPool) { + ctrl.enqueueAfter(pool, ctrl.config.UpdateDelay) +} + +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncHandler is never invoked concurrently with the same key. +func (ctrl *Controller) worker() { + for ctrl.processNextWorkItem() { + } +} + +func (ctrl *Controller) processNextWorkItem() bool { + key, quit := ctrl.queue.Get() + if quit { + return false + } + defer ctrl.queue.Done(key) + + err := ctrl.syncHandler(key.(string)) + ctrl.handleErr(err, key) + + return true +} + +func (ctrl *Controller) handleErr(err error, key interface{}) { + if err == nil { + ctrl.queue.Forget(key) + return + } + + if ctrl.queue.NumRequeues(key) < ctrl.config.MaxRetries { + glog.V(2).Infof("Error syncing machineconfigpool %v: %v", key, err) + ctrl.queue.AddRateLimited(key) + return + } + + utilruntime.HandleError(err) + glog.V(2).Infof("Dropping machineconfigpool %q out of the queue: %v", key, err) + ctrl.queue.Forget(key) + ctrl.queue.AddAfter(key, 1*time.Minute) +} + +// syncMachineConfigPool will sync the machineconfig pool with the given key. +// This function is not meant to be invoked concurrently with the same key. +func (ctrl *Controller) syncMachineConfigPool(key string) error { + startTime := time.Now() + glog.V(4).Infof("Started syncing machineconfigpool %q (%v)", key, startTime) + defer func() { + glog.V(4).Infof("Finished syncing machineconfigpool %q (%v)", key, time.Since(startTime)) + }() + + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + machineconfigpool, err := ctrl.mcpLister.Get(name) + if k8serrors.IsNotFound(err) { + glog.V(2).Infof("MachineConfigPool %v has been deleted", key) + return nil + } + if err != nil { + return err + } + + // TODO: Doing a deep copy of this pool object from our cache and using it to + // determine our next course of action sometimes causes a race condition. I'm + // not sure if it's better to get a current copy from the API server or what. + // pool := machineconfigpool.DeepCopy() + pool, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), machineconfigpool.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + // Not a layered pool, so stop here. + if !ctrlcommon.IsLayeredPool(pool) { + glog.V(4).Infof("MachineConfigPool %s is not opted-in for layering, ignoring", pool.Name) + return nil + } + + // If we need to do a build, we let updateMachineConfigPool() handle that + // determination. It registers its intent to build by setting + // ctrlcommon.MachineCnofigPoolBuildPending on the MachineConfigPool. + // + // We look for ctrlcommon.MachineConfigPoolBuildPending and if found, we + // start the build and set the condition to + // ctrlcommon.MachineConfigPoolBuilding. + // + // We use the PodInformer to determine whether the build is complete. The + // PodInformer will set either MachineConfigPoolBuildSuccess or + // MachineConfigPoolBuildFailed. + + switch { + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolDegraded): + glog.V(4).Infof("MachineConfigPool %s is degraded, requeueing", pool.Name) + ctrl.enqueueMachineConfigPool(pool) + return nil + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded): + glog.V(4).Infof("MachineConfigPool %s is render degraded, requeueing", pool.Name) + ctrl.enqueueMachineConfigPool(pool) + return nil + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildPending): + glog.V(4).Infof("MachineConfigPool %s needs a build, starting", pool.Name) + return ctrl.startBuildForMachineConfigPool(pool) + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuilding): + glog.V(4).Infof("MachineConfigPool %s is building", pool.Name) + return nil + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildSuccess): + glog.V(4).Infof("MachineConfigPool %s has successfully built", pool.Name) + return nil + default: + glog.V(4).Infof("Nothing to do for pool %q", pool.Name) + } + + // For everything else + return ctrl.syncAvailableStatus(pool) +} + +// Marks a given MachineConfigPool as a failed build. +func (ctrl *Controller) markBuildFailed(pool *mcfgv1.MachineConfigPool) error { + glog.Errorf("Build failed for pool %s", pool.Name) + + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Reason: "BuildFailed", + Status: corev1.ConditionTrue, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildPending, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolDegraded, + Status: corev1.ConditionTrue, + }, + }) + + return ctrl.syncFailingStatus(pool, fmt.Errorf("build failed")) +} + +// Marks a given MachineConfigPool as the build is in progress. +func (ctrl *Controller) markBuildInProgress(pool *mcfgv1.MachineConfigPool) error { + glog.Infof("Build in progress for MachineConfigPool %s, config %s", pool.Name, pool.Spec.Configuration.Name) + + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Reason: "BuildRunning", + Status: corev1.ConditionTrue, + }, + { + Type: mcfgv1.MachineConfigPoolBuildPending, + Status: corev1.ConditionFalse, + }, + }) + + return ctrl.syncAvailableStatus(pool) +} + +// Marks a given MachineConfigPool as build successful and cleans up after itself. +func (ctrl *Controller) markBuildSucceeded(pool *mcfgv1.MachineConfigPool) error { + glog.Infof("Build succeeded for MachineConfigPool %s, config %s", pool.Name, pool.Spec.Configuration.Name) + + if err := ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Delete(context.TODO(), getBuildPodName(pool), metav1.DeleteOptions{}); err != nil { + return fmt.Errorf("unable to delete build pod: %w", err) + } + + // Set the annotation or field to point to the newly-built container image. + // TODO: Figure out how to get that from the build interface. + deleteBuildPodRefFromMachineConfigPool(pool) + pool.Labels[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] = "new-image-pullspec" + + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Reason: "BuildSucceeded", + Status: corev1.ConditionTrue, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolDegraded, + Status: corev1.ConditionFalse, + }, + }) + + // We need to do an API server round-trip to ensure all of our mutations get + // propagated. + updatedPool, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update MachineConfigPool %q: %w", pool.Name, err) + } + + return ctrl.syncAvailableStatus(updatedPool) +} + +// Marks a given MachineConfigPool as build pending. +func (ctrl *Controller) markBuildPending(pool *mcfgv1.MachineConfigPool) error { + glog.Infof("Build for %s marked pending", pool.Name) + + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildPending, + Reason: "BuildPending", + Status: corev1.ConditionTrue, + }, + }) + + return ctrl.syncAvailableStatus(pool) +} + +// Machine Config Pools + +func (ctrl *Controller) addMachineConfigPool(obj interface{}) { + pool := obj.(*mcfgv1.MachineConfigPool).DeepCopy() + glog.V(4).Infof("Adding MachineConfigPool %s", pool.Name) + ctrl.enqueueMachineConfigPool(pool) +} + +func (ctrl *Controller) isBuildRunningForPool(pool *mcfgv1.MachineConfigPool) (bool, error) { + // First check if we have a build in progress for this MachineConfigPool and rendered config. + _, err := ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(context.TODO(), getBuildPodName(pool), metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return false, err + } + + return err == nil, nil +} + +// Determines if we should run a build, then starts a build pod to perform the +// build, and updates the MachineConfigPool with an object reference for the +// build pod. +func (ctrl *Controller) startBuildForMachineConfigPool(pool *mcfgv1.MachineConfigPool) error { + targetMC := pool.Spec.Configuration.Name + + // TODO: Find a constant for this: + if !strings.HasPrefix(targetMC, "rendered-") { + return fmt.Errorf("%s is not a rendered MachineConfig", targetMC) + } + + isBuildRunning, err := ctrl.isBuildRunningForPool(pool) + if err != nil { + return fmt.Errorf("could not determine if a preexisting build is running for %s: %w", pool.Name, err) + } + + if isBuildRunning { + return nil + } + + glog.Infof("Starting build for pool %s", pool.Name) + glog.Infof("Build pod name: %s", getBuildPodName(pool)) + + // TODO: Figure out how to use the Builder interface for starting the build instead of this. + pod, err := ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Create(context.TODO(), newBuildPod(pool), metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("could not create build pod: %w", err) + } + + if !machineConfigPoolHasBuildPodRef(pool) { + ref := corev1.ObjectReference{ + Kind: "Pod", + Name: pod.Name, + Namespace: pod.Namespace, + UID: pod.UID, + } + + pool.Spec.Configuration.Source = append(pool.Spec.Configuration.Source, ref) + + pool.Status.Configuration.Source = append(pool.Status.Configuration.Source, ref) + } + + return ctrl.syncAvailableStatus(pool) +} + +// If one wants to opt out, this removes all of the statuses and object +// references from a given MachineConfigPool. +func (ctrl *Controller) finalizeOptOut(pool *mcfgv1.MachineConfigPool) error { + deleteBuildPodRefFromMachineConfigPool(pool) + + conditions := []mcfgv1.MachineConfigPoolCondition{} + + for _, condition := range pool.Status.Conditions { + buildConditionFound := false + for _, buildConditionType := range getMachineConfigPoolBuildConditions() { + if condition.Type == buildConditionType { + buildConditionFound = true + break + } + } + + if !buildConditionFound { + conditions = append(conditions, condition) + } + } + + pool.Status.Conditions = conditions + return ctrl.syncAvailableStatus(pool) +} + +func (ctrl *Controller) updateMachineConfigPool(old, cur interface{}) { + oldPool := old.(*mcfgv1.MachineConfigPool).DeepCopy() + curPool := cur.(*mcfgv1.MachineConfigPool).DeepCopy() + + glog.V(4).Infof("Updating MachineConfigPool %s", oldPool.Name) + + doABuild, err := shouldWeDoABuild(ctrl.kubeclient, oldPool, curPool) + if err != nil { + glog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return + } + + switch { + case ctrlcommon.IsLayeredPool(oldPool) && !ctrlcommon.IsLayeredPool(curPool): + glog.V(4).Infof("MachineConfigPool %s has opted out of layering", curPool.Name) + if err := ctrl.finalizeOptOut(curPool); err != nil { + glog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return + } + case doABuild: + glog.V(4).Infof("MachineConfigPool %s has changed, requiring a build", curPool.Name) + if err := ctrl.markBuildPending(curPool); err != nil { + glog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return + } + default: + glog.V(4).Infof("MachineConfigPool %s up-to-date", curPool.Name) + } + + ctrl.enqueueMachineConfigPool(curPool) +} + +func (ctrl *Controller) deleteMachineConfigPool(obj interface{}) { + pool, ok := obj.(*mcfgv1.MachineConfigPool) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + return + } + pool, ok = tombstone.Obj.(*mcfgv1.MachineConfigPool) + if !ok { + utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a MachineConfigPool %#v", obj)) + return + } + } + glog.V(4).Infof("Deleting MachineConfigPool %s", pool.Name) +} + +func (ctrl *Controller) syncAvailableStatus(pool *mcfgv1.MachineConfigPool) error { + // I'm not sure what the consequences are of not doing this. + //nolint:gocritic // Leaving this here for review purposes. + /* + if mcfgv1.IsMachineConfigPoolConditionFalse(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded) { + return nil + } + */ + sdegraded := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionFalse, "", "") + mcfgv1.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) + + if _, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { + return err + } + + return nil +} + +func (ctrl *Controller) syncFailingStatus(pool *mcfgv1.MachineConfigPool, err error) error { + sdegraded := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionTrue, "", fmt.Sprintf("Failed to build configuration for pool %s: %v", pool.Name, err)) + mcfgv1.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) + if _, updateErr := ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); updateErr != nil { + glog.Errorf("Error updating MachineConfigPool %s: %v", pool.Name, updateErr) + } + return err +} + +// Determines if a MachineConfigPool has a build pod reference. +func machineConfigPoolHasBuildPodRef(pool *mcfgv1.MachineConfigPool) bool { + buildPodName := getBuildPodName(pool) + + searchFunc := func(cfg mcfgv1.MachineConfigPoolStatusConfiguration) bool { + for _, src := range cfg.Source { + if src.Name == buildPodName && src.Kind == "Pod" { + return true + } + } + + return false + } + + return searchFunc(pool.Spec.Configuration) && searchFunc(pool.Status.Configuration) +} + +// Computes the build pod name based upon the MachineConfigPool name and the +// current rendered config. +func getBuildPodName(pool *mcfgv1.MachineConfigPool) string { + return fmt.Sprintf("build-%s-%s", pool.Name, pool.Spec.Configuration.Name) +} + +// Deletes the build pod references from the MachineConfigPool. +func deleteBuildPodRefFromMachineConfigPool(pool *mcfgv1.MachineConfigPool) { + buildPodName := getBuildPodName(pool) + + deleteFunc := func(cfg mcfgv1.MachineConfigPoolStatusConfiguration) []corev1.ObjectReference { + configSources := []corev1.ObjectReference{} + + for _, src := range cfg.Source { + if src.Name != buildPodName { + configSources = append(configSources, src) + } + } + + return configSources + } + + pool.Spec.Configuration.Source = deleteFunc(pool.Spec.Configuration) + pool.Status.Configuration.Source = deleteFunc(pool.Status.Configuration) +} + +// Determines if two conditions are equal. Note: I purposely do not include the +// timestamp in the equality test, since we do not directly set it. +func isConditionEqual(cond1, cond2 mcfgv1.MachineConfigPoolCondition) bool { + return cond1.Type == cond2.Type && + cond1.Status == cond2.Status && + cond1.Message == cond2.Message && + cond1.Reason == cond2.Reason +} + +// Sets MCP build conditions on a given MachineConfigPool. +func setMCPBuildConditions(pool *mcfgv1.MachineConfigPool, conditions []mcfgv1.MachineConfigPoolCondition) { + for _, condition := range conditions { + condition := condition + currentCondition := mcfgv1.GetMachineConfigPoolCondition(pool.Status, condition.Type) + if currentCondition != nil && isConditionEqual(*currentCondition, condition) { + continue + } + + mcpCondition := mcfgv1.NewMachineConfigPoolCondition(condition.Type, condition.Status, condition.Reason, condition.Message) + mcfgv1.SetMachineConfigPoolCondition(&pool.Status, *mcpCondition) + } +} + +// Determine if we have a config change. +func isPoolConfigChange(oldPool, curPool *mcfgv1.MachineConfigPool) bool { + return oldPool.Spec.Configuration.Name != curPool.Spec.Configuration.Name +} + +// Determine if we have an image pullspec label. +func hasImagePullspecLabel(pool *mcfgv1.MachineConfigPool) bool { + imagePullspecLabel, ok := pool.Labels[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] + return imagePullspecLabel != "" && ok +} + +// Check our pool state to see if we have a build in progress or a failed build. +func isPoolConditionBuild(pool *mcfgv1.MachineConfigPool) bool { + return !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuilding) && + !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildPending) && + !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildFailed) +} + +// Determines if we should do a build based upon the state of our +// MachineConfigPool, the presence of a build pod, etc. +func shouldWeDoABuild(kubeclient clientset.Interface, oldPool, curPool *mcfgv1.MachineConfigPool) (bool, error) { + // If we don't have a layered pool, we should not build. + poolStateSuggestsBuild := ctrlcommon.IsLayeredPool(curPool) && + // If our pool state indicates that we do not have a build in progress, then + // we should do a build. + isPoolConditionBuild(curPool) && + // If we have a config change or we're missing an image pullspec label, we + // should do a build. + (isPoolConfigChange(oldPool, curPool) || !hasImagePullspecLabel(curPool)) && + // If we're missing a build pod reference, it likely means we don't need to + // do a build. + !machineConfigPoolHasBuildPodRef(curPool) + + if !poolStateSuggestsBuild { + return false, nil + } + + // Look for a build pod. + _, err := kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(context.TODO(), getBuildPodName(curPool), metav1.GetOptions{}) + + // If we have an error and it's not because the build pod was not found, return said error. + if err != nil && !k8serrors.IsNotFound(err) { + return false, err + } + + return k8serrors.IsNotFound(err), nil +} + +// Enumerates all of the build-related MachineConfigPool condition types. +func getMachineConfigPoolBuildConditions() []mcfgv1.MachineConfigPoolConditionType { + return []mcfgv1.MachineConfigPoolConditionType{ + mcfgv1.MachineConfigPoolBuildFailed, + mcfgv1.MachineConfigPoolBuildPending, + mcfgv1.MachineConfigPoolBuildSuccess, + mcfgv1.MachineConfigPoolBuilding, + } +} + +// Creates a new build pod object. +func newBuildPod(pool *mcfgv1.MachineConfigPool) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: getBuildPodName(pool), + Namespace: ctrlcommon.MCONamespace, + Labels: map[string]string{ + ctrlcommon.OSImageBuildPodLabel: "", + targetMachineConfigPoolLabel: pool.Name, + desiredConfigLabel: pool.Spec.Configuration.Name, + }, + }, + Spec: corev1.PodSpec{}, + } +} + +// Determines if a pod is a build pod by examining its labels. +func isBuildPod(pod *corev1.Pod) bool { + requiredLabels := []string{ + ctrlcommon.OSImageBuildPodLabel, + targetMachineConfigPoolLabel, + desiredConfigLabel, + } + + for _, label := range requiredLabels { + if _, ok := pod.Labels[label]; !ok { + return false + } + } + + return true +} diff --git a/pkg/controller/build/build_controller_fake_client.go b/pkg/controller/build/build_controller_fake_client.go new file mode 100644 index 0000000000..303ce5723c --- /dev/null +++ b/pkg/controller/build/build_controller_fake_client.go @@ -0,0 +1,187 @@ +package build + +// This code is copy/pasted from +// https://raw.githubusercontent.com/openshift/client-go/master/build/clientset/versioned/fake/clientset_generated.go. +// All of this is necessary because the Instantiate() method on the official +// generated FakeClient panics because of an incorrect type assertion :P. + +import ( + "context" + "fmt" + + "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake" + + fakebuildclient "github.com/openshift/client-go/build/clientset/versioned/fake" + + v1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" + + buildv1 "github.com/openshift/api/build/v1" + applyconfigurationsbuildv1 "github.com/openshift/client-go/build/applyconfigurations/build/v1" + clientset "github.com/openshift/client-go/build/clientset/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + "k8s.io/client-go/testing" +) + +var _ v1.BuildV1Interface = &wrappedFakeBuildClient{} +var _ v1.BuildConfigInterface = &wrappedFakeBuildConfigs{} +var _ clientset.Interface = &wrappedFakeBuildClientset{} + +type wrappedFakeBuildClientset struct { + cs clientset.Interface + buildv1Client *wrappedFakeBuildClient +} + +func newWrappedFakeBuildClientset(obj ...runtime.Object) *wrappedFakeBuildClientset { + return &wrappedFakeBuildClientset{ + cs: fakebuildclient.NewSimpleClientset(obj...), + buildv1Client: newWrappedFakeBuildClient(obj...), + } +} + +func (c *wrappedFakeBuildClientset) Discovery() discovery.DiscoveryInterface { + return c.cs.Discovery() +} + +func (c *wrappedFakeBuildClientset) BuildV1() v1.BuildV1Interface { + return c.buildv1Client +} + +type wrappedFakeBuildClient struct { + client v1.BuildV1Interface +} + +func newWrappedFakeBuildClient(obj ...runtime.Object) *wrappedFakeBuildClient { + return &wrappedFakeBuildClient{ + client: fakebuildclient.NewSimpleClientset(obj...).BuildV1(), + } +} + +func (w *wrappedFakeBuildClient) Builds(namespace string) v1.BuildInterface { + return w.client.Builds(namespace) +} + +func (w *wrappedFakeBuildClient) BuildConfigs(namespace string) v1.BuildConfigInterface { + return newWrappedFakeBuildConfigs(w.client, namespace) +} + +func (w *wrappedFakeBuildClient) RESTClient() rest.Interface { + return w.client.RESTClient() +} + +type wrappedFakeBuildConfigs struct { + parentClient v1.BuildV1Interface + client *fake.FakeBuildConfigs + namespace string +} + +func newWrappedFakeBuildConfigs(client v1.BuildV1Interface, namespace string) *wrappedFakeBuildConfigs { + return &wrappedFakeBuildConfigs{ + parentClient: client, + client: client.BuildConfigs(namespace).(*fake.FakeBuildConfigs), + namespace: namespace, + } +} + +func (c *wrappedFakeBuildConfigs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *buildv1.BuildConfig, err error) { + return c.client.Get(ctx, name, options) +} + +func (c *wrappedFakeBuildConfigs) List(ctx context.Context, opts metav1.ListOptions) (result *buildv1.BuildConfigList, err error) { + return c.client.List(ctx, opts) +} + +func (c *wrappedFakeBuildConfigs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.client.Watch(ctx, opts) +} + +func (c *wrappedFakeBuildConfigs) Create(ctx context.Context, buildConfig *buildv1.BuildConfig, opts metav1.CreateOptions) (result *buildv1.BuildConfig, err error) { + return c.client.Create(ctx, buildConfig, opts) +} + +func (c *wrappedFakeBuildConfigs) Update(ctx context.Context, buildConfig *buildv1.BuildConfig, opts metav1.UpdateOptions) (result *buildv1.BuildConfig, err error) { + return c.client.Update(ctx, buildConfig, opts) +} + +func (c *wrappedFakeBuildConfigs) UpdateStatus(ctx context.Context, buildConfig *buildv1.BuildConfig, opts metav1.UpdateOptions) (*buildv1.BuildConfig, error) { + return c.client.UpdateStatus(ctx, buildConfig, opts) +} + +func (c *wrappedFakeBuildConfigs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(ctx, name, opts) +} + +func (c *wrappedFakeBuildConfigs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listopts metav1.ListOptions) error { + return c.client.DeleteCollection(ctx, opts, listopts) +} + +func (c *wrappedFakeBuildConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *buildv1.BuildConfig, err error) { + return c.client.Patch(ctx, name, pt, data, opts, subresources...) +} + +func (c *wrappedFakeBuildConfigs) Apply(ctx context.Context, buildConfig *applyconfigurationsbuildv1.BuildConfigApplyConfiguration, opts metav1.ApplyOptions) (result *buildv1.BuildConfig, err error) { + return c.client.Apply(ctx, buildConfig, opts) +} + +func (c *wrappedFakeBuildConfigs) ApplyStatus(ctx context.Context, buildConfig *applyconfigurationsbuildv1.BuildConfigApplyConfiguration, opts metav1.ApplyOptions) (result *buildv1.BuildConfig, err error) { + return c.client.ApplyStatus(ctx, buildConfig, opts) +} + +// All of this is necessary because this method doesn't work as it should in the officially-generated FakeClient :P +func (c *wrappedFakeBuildConfigs) Instantiate(ctx context.Context, buildConfigName string, buildRequest *buildv1.BuildRequest, opts metav1.CreateOptions) (result *buildv1.Build, err error) { + buildconfigsResource := schema.GroupVersionResource{Group: "build.openshift.io", Version: "v1", Resource: "buildconfigs"} + + obj, err := c.client.Fake. + Invokes(testing.NewCreateSubresourceAction(buildconfigsResource, buildConfigName, "instantiate", c.namespace, buildRequest), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + + buildConfig, err := c.Get(ctx, buildConfigName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + namespace := buildConfig.Namespace + if namespace == "" { + namespace = c.namespace + } + + // We're looking for builds that belong to this buildconfig, so craft a filter. + ourBuildReq, err := labels.NewRequirement("buildconfig", selection.In, []string{buildConfig.Name}) + if err != nil { + return nil, err + } + + builds, err := c.parentClient.Builds(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: labels.NewSelector().Add(*ourBuildReq).String(), + }) + + if err != nil { + return nil, err + } + + b := &buildv1.Build{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: fmt.Sprintf("%s-%d", buildConfig.Name, len(builds.Items)+1), + Labels: map[string]string{ + "buildconfig": buildConfigName, + }, + }, + Spec: buildv1.BuildSpec{ + CommonSpec: buildConfig.Spec.CommonSpec, + TriggeredBy: buildRequest.TriggeredBy, + }, + } + + return c.parentClient.Builds(namespace).Create(ctx, b, opts) +} diff --git a/pkg/controller/build/build_controller_fake_client_test.go b/pkg/controller/build/build_controller_fake_client_test.go new file mode 100644 index 0000000000..28d7fe0298 --- /dev/null +++ b/pkg/controller/build/build_controller_fake_client_test.go @@ -0,0 +1,65 @@ +package build + +import ( + "context" + "fmt" + "testing" + + buildv1 "github.com/openshift/api/build/v1" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestWrappedFakeClient(t *testing.T) { + t.Parallel() + + namespace := "a-namespace" + buildConfigName := "a-build-config" + + dockerfile := "FROM scratch" + + buildConfig := &buildv1.BuildConfig{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: buildConfigName, + }, + Spec: buildv1.BuildConfigSpec{ + CommonSpec: buildv1.CommonSpec{ + Source: buildv1.BuildSource{ + Type: buildv1.BuildSourceDockerfile, + Dockerfile: &dockerfile, + }, + }, + }, + } + + client := newWrappedFakeBuildClientset(buildConfig) + + buildRequest := &buildv1.BuildRequest{ + ObjectMeta: metav1.ObjectMeta{Name: buildConfigName}, + TriggeredBy: []buildv1.BuildTriggerCause{ + {Message: "Unit test"}, + }, + } + + // Test that the FakeClient correctly increments the builds. + buildCount := 10 + for i := 1; i <= buildCount; i++ { + build, err := client.BuildV1().BuildConfigs(namespace).Instantiate(context.TODO(), buildRequest.Name, buildRequest, metav1.CreateOptions{}) + assert.NoError(t, err) + assert.NotNil(t, build) + assert.Equal(t, buildConfig.Spec.CommonSpec, build.Spec.CommonSpec) + assert.Equal(t, buildRequest.TriggeredBy, build.Spec.TriggeredBy) + + buildName := fmt.Sprintf("%s-%d", buildConfigName, i) + fetchedBuild, err := client.BuildV1().Builds(namespace).Get(context.TODO(), buildName, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, fetchedBuild) + assert.Equal(t, buildConfig.Spec.CommonSpec, fetchedBuild.Spec.CommonSpec) + assert.Equal(t, buildRequest.TriggeredBy, fetchedBuild.Spec.TriggeredBy) + } + + buildList, err := client.BuildV1().Builds(namespace).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + assert.Len(t, buildList.Items, buildCount) +} diff --git a/pkg/controller/build/build_controller_test.go b/pkg/controller/build/build_controller_test.go new file mode 100644 index 0000000000..127625b6a9 --- /dev/null +++ b/pkg/controller/build/build_controller_test.go @@ -0,0 +1,503 @@ +package build + +import ( + "context" + "fmt" + "time" + + imageclientset "github.com/openshift/client-go/image/clientset/versioned" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + buildclientset "github.com/openshift/client-go/build/clientset/versioned" + + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + mcfgclientset "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned" + fakeclientmachineconfigv1 "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/fake" + mcfgv1informers "github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + coreinformers "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" + fakecorev1client "k8s.io/client-go/kubernetes/fake" + + corev1 "k8s.io/api/core/v1" + + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + + "testing" +) + +// Holds hte fake clients used for running the BuildController tests. +type buildControllerClientset struct { + ImageClient imageclientset.Interface + BuildClient buildclientset.Interface + KubeClient clientset.Interface + McfgClient mcfgclientset.Interface +} + +// Holds a name and function to implement a given BuildController test. +type buildControllerTestCase struct { + name string + testFunc func(context.Context, *testing.T, *buildControllerClientset) +} + +// Instantiates all of the initial objects and starts the BuildController. +func (b *buildControllerTestCase) startBuildController(ctx context.Context, t *testing.T) *buildControllerClientset { + bcCtx, bcCtxCancel := context.WithCancel(ctx) + t.Cleanup(bcCtxCancel) + + kubeClient := fakecorev1client.NewSimpleClientset( + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etc-pki-entitlement", + Namespace: "openshift-config-managed", + }, + Data: map[string][]byte{ + "entitlement-key.pem": []byte("abc"), + "entitlement.pem": []byte("123"), + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-config-operator", + Namespace: ctrlcommon.MCONamespace, + }, + }, + ) + + fakeMCclient := fakeclientmachineconfigv1.NewSimpleClientset( + newMachineConfigPool("master", "rendered-master-1"), + newMachineConfigPool("worker", "rendered-worker-1"), + &mcfgv1.ControllerConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-config-controller", + }, + }) + + ccInformer := mcfgv1informers.NewSharedInformerFactory(fakeMCclient, 0) + mcpInformer := mcfgv1informers.NewSharedInformerFactory(fakeMCclient, 0) + coreInformer := coreinformers.NewSharedInformerFactoryWithOptions(kubeClient, 0, coreinformers.WithNamespace(ctrlcommon.MCONamespace)) + + config := BuildControllerConfig{ + MaxRetries: 5, + UpdateDelay: time.Millisecond * 100, + } + + ctrl := New( + config, + coreInformer.Core().V1().Pods(), + ccInformer.Machineconfiguration().V1().ControllerConfigs(), + mcpInformer.Machineconfiguration().V1().MachineConfigPools(), + fakeMCclient, + kubeClient, + ) + + coreInformer.Start(bcCtx.Done()) + ccInformer.Start(bcCtx.Done()) + mcpInformer.Start(bcCtx.Done()) + + go ctrl.Run(5, bcCtx.Done()) + + return &buildControllerClientset{ + KubeClient: kubeClient, + McfgClient: fakeMCclient, + } +} + +// Runs the attached test function in parallel. +func (b *buildControllerTestCase) run(ctx context.Context, t *testing.T) { + t.Run(b.name, func(t *testing.T) { + t.Parallel() + t.Cleanup(func() { + // dumpObjects(ctx, t, bcc, strings.ToLower(strings.ReplaceAll(t.Name(), "/", "_"))) + }) + + testCtx, testCtxCancel := context.WithTimeout(ctx, time.Second*15) + t.Cleanup(testCtxCancel) + + bcc := b.startBuildController(testCtx, t) + b.testFunc(ctx, t, bcc) + }) +} + +// Helper that determines if the build is a success. +func isMCPBuildSuccess(mcp *mcfgv1.MachineConfigPool) bool { + configAnnotation, hasConfigAnnotation := mcp.Labels[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] + + expectedConfigAnnotation := "new-image-pullspec" + + return hasConfigAnnotation && + ctrlcommon.IsLayeredPool(mcp) && + configAnnotation == expectedConfigAnnotation && + mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolBuildSuccess) && + !machineConfigPoolHasBuildPodRef(mcp) +} + +// Opts a given MachineConfigPool into layering and asserts that the MachineConfigPool reaches the desired state. +func testOptInMCP(ctx context.Context, t *testing.T, bcc *buildControllerClientset, poolName string) { + mcp := optInMCP(ctx, t, bcc, poolName) + assertMCPFollowsBuildPodStatus(ctx, t, bcc, mcp, corev1.PodSucceeded) + assertMachineConfigPoolReachesState(ctx, t, bcc, poolName, isMCPBuildSuccess) +} + +// Mutates all MachineConfigPools that are not opted in to ensure they are ignored. +func testNoMCPsOptedIn(ctx context.Context, t *testing.T, bcc *buildControllerClientset) { + // Set an unrelated label to force a sync. + mcpList, err := bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + for _, mcp := range mcpList.Items { + mcp := mcp + mcp.Labels["a-label-key"] = "" + _, err := bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().Update(ctx, &mcp, metav1.UpdateOptions{}) + require.NoError(t, err) + } + + mcpList, err = bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + for _, mcp := range mcpList.Items { + mcp := mcp + assert.False(t, ctrlcommon.IsLayeredPool(&mcp)) + assert.NotContains(t, mcp.Labels, ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey) + } +} + +// Opts in a single MachineConfigPool and ensures that it reaches the desired state. +func testSingleMCPOptedIn(ctx context.Context, t *testing.T, bcc *buildControllerClientset) { + testOptInMCP(ctx, t, bcc, "worker") +} + +// Opts multiple MachineConfigPools in and ensures that they reach the desired states. +func testMultipleMCPsOptedIn(ctx context.Context, t *testing.T, bcc *buildControllerClientset) { + poolNames := []string{ + "master", + "worker", + } + + for _, poolName := range poolNames { + poolName := poolName + + t.Run(poolName, func(t *testing.T) { + t.Parallel() + testOptInMCP(ctx, t, bcc, poolName) + }) + } +} + +// Tests that a failed build degrades the target MachineConfigPool. +func testMCPBuildFailure(ctx context.Context, t *testing.T, bcc *buildControllerClientset, poolName string) { + mcp := optInMCP(ctx, t, bcc, poolName) + + assertMCPFollowsBuildPodStatus(ctx, t, bcc, mcp, corev1.PodFailed) + + assertMachineConfigPoolReachesState(ctx, t, bcc, poolName, func(mcp *mcfgv1.MachineConfigPool) bool { + return mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolBuildFailed) && + mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolDegraded) + }) +} + +// Tests that a failed build degrades the target MachineConfigPool. +func testSingleMCPBuildFailure(ctx context.Context, t *testing.T, bcc *buildControllerClientset) { + testMCPBuildFailure(ctx, t, bcc, "worker") +} + +// Tests that a failed build degrades all target MachineConfigPools. +func testMultipleMCPBuildFailures(ctx context.Context, t *testing.T, bcc *buildControllerClientset) { + poolNames := []string{ + "master", + "worker", + } + + for _, poolName := range poolNames { + poolName := poolName + + t.Run(poolName, func(t *testing.T) { + testMCPBuildFailure(ctx, t, bcc, poolName) + }) + } +} + +// Tests that multiple configs are serially rolled out to the target +// MachineConfigPool and ensures that each config is rolled out before moving +// onto the next one. +func testMultipleConfigsAreRolledOutDriver(ctx context.Context, t *testing.T, bcc *buildControllerClientset, poolName string) { + for i := 1; i < 10; i++ { + config := fmt.Sprintf("rendered-%s-%d", poolName, i) + + t.Run(config, func(t *testing.T) { + workerMCP, err := bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + require.NoError(t, err) + + workerMCP.Spec.Configuration.Name = config + + _, err = bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().Update(ctx, workerMCP, metav1.UpdateOptions{}) + require.NoError(t, err) + testOptInMCP(ctx, t, bcc, poolName) + + assertMachineConfigPoolReachesState(ctx, t, bcc, poolName, func(mcp *mcfgv1.MachineConfigPool) bool { + return mcp.Spec.Configuration.Name == config && isMCPBuildSuccess(mcp) + }) + }) + } +} + +// Tests that multiple configs are serially rolled out to the target +// MachineConfigPool and ensures that each config is rolled out before moving +// onto the next one. +func testMultipleConfigsAreRolledOut(ctx context.Context, t *testing.T, bcc *buildControllerClientset) { + testMultipleConfigsAreRolledOutDriver(ctx, t, bcc, "worker") +} + +// Tests that multiple configs are serially rolled out to all +// MachineConfigPools and ensures that each config is rolled out before moving +// onto the next one. Note: This is parallelized across all of the +// MachineConfigPools. +func testMultipleConfigsAreRolledOutToAllMCPs(ctx context.Context, t *testing.T, bcc *buildControllerClientset) { + poolNames := []string{ + "master", + "worker", + } + + for _, poolName := range poolNames { + poolName := poolName + t.Run(poolName, func(t *testing.T) { + t.Parallel() + testMultipleConfigsAreRolledOutDriver(ctx, t, bcc, poolName) + }) + } +} + +// Tests that an opted-in MachineConfigPool is able to opt back out. +func testOptedInMCPOptsOut(ctx context.Context, t *testing.T, bcc *buildControllerClientset) { + testOptInMCP(ctx, t, bcc, "worker") + + optOutMCP(ctx, t, bcc, "worker") + + assertMachineConfigPoolReachesState(ctx, t, bcc, "worker", func(mcp *mcfgv1.MachineConfigPool) bool { + layeringLabels := []string{ + ctrlcommon.LayeringEnabledPoolLabel, + } + + for _, label := range layeringLabels { + if _, ok := mcp.Labels[label]; ok { + return false + } + } + + for _, condition := range getMachineConfigPoolBuildConditions() { + if mcfgv1.IsMachineConfigPoolConditionPresentAndEqual(mcp.Status.Conditions, condition, corev1.ConditionTrue) || + mcfgv1.IsMachineConfigPoolConditionPresentAndEqual(mcp.Status.Conditions, condition, corev1.ConditionFalse) { + return false + } + } + + return !machineConfigPoolHasBuildPodRef(mcp) + }) +} + +// Tests that if a MachineConfigPool is degraded, that a build pod is not created. +func testMcpIsDegraded(ctx context.Context, t *testing.T, bcc *buildControllerClientset) { + mcp, err := bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().Get(ctx, "worker", metav1.GetOptions{}) + require.NoError(t, err) + + mcp.Labels = map[string]string{ + ctrlcommon.LayeringEnabledPoolLabel: "", + } + + condition := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolDegraded, corev1.ConditionTrue, "", "") + mcfgv1.SetMachineConfigPoolCondition(&mcp.Status, *condition) + + _, err = bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().Update(ctx, mcp, metav1.UpdateOptions{}) + require.NoError(t, err) + + assertMachineConfigPoolReachesState(ctx, t, bcc, "worker", func(mcp *mcfgv1.MachineConfigPool) bool { + // TODO: Should we fail the build without even starting it if the pool is degraded? + for _, condition := range getMachineConfigPoolBuildConditions() { + if mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, condition) { + return false + } + } + + return mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolDegraded) && + assertNoBuildPods(ctx, t, bcc) + }) +} + +// Tests that a label update or similar does not cause a build to occur. +func testBuiltPoolGetsUnrelatedUpdate(ctx context.Context, t *testing.T, bcc *buildControllerClientset) { + testOptInMCP(ctx, t, bcc, "worker") + + pool, err := bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().Get(ctx, "worker", metav1.GetOptions{}) + require.NoError(t, err) + + pool.Annotations["unrelated-annotation"] = "hello" + pool.Labels["unrelated-label"] = "" + _, err = bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().Update(ctx, pool, metav1.UpdateOptions{}) + require.NoError(t, err) + + assertMachineConfigPoolReachesState(ctx, t, bcc, "worker", func(mcp *mcfgv1.MachineConfigPool) bool { + return assert.Equal(t, mcp.Status.Conditions, pool.Status.Conditions) && + assertNoBuildPods(ctx, t, bcc) + }) +} + +// Tests all of the major BuildController functionalities. +func TestBuildController(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + t.Cleanup(cancel) + + testCases := []buildControllerTestCase{ + { + name: "No MCPs opted in", + testFunc: testNoMCPsOptedIn, + }, + { + name: "Single MCP opted in", + testFunc: testSingleMCPOptedIn, + }, + { + name: "Multiple MCPs opted in", + testFunc: testMultipleMCPsOptedIn, + }, + { + name: "Single MCP build failure", + testFunc: testSingleMCPBuildFailure, + }, + { + name: "Multiple MCP build failures", + testFunc: testMultipleMCPBuildFailures, + }, + { + name: "Multiple configs are rolled out to single MCP", + testFunc: testMultipleConfigsAreRolledOut, + }, + { + name: "Multiple configs rolled out to all MCPs", + testFunc: testMultipleConfigsAreRolledOutToAllMCPs, + }, + { + name: "Opted in MCP opts out", + testFunc: testOptedInMCPOptsOut, + }, + { + name: "MCP is degraded", + testFunc: testMcpIsDegraded, + }, + { + name: "Built pool gets unrelated update", + testFunc: testBuiltPoolGetsUnrelatedUpdate, + }, + } + + for _, testCase := range testCases { + testCase := testCase + testCase.run(ctx, t) + } +} + +// Tests if we should do a build for a variety of edge-cases and circumstances. +func TestShouldWeDoABuild(t *testing.T) { + t.Parallel() + + // Mutators which mutate the given MachineConfigPool. + toLayeredPool := func(mcp *mcfgv1.MachineConfigPool) *mcfgv1.MachineConfigPool { + mcp.Labels[ctrlcommon.LayeringEnabledPoolLabel] = "" + return mcp + } + + toLayeredPoolWithImagePullspec := func(mcp *mcfgv1.MachineConfigPool) *mcfgv1.MachineConfigPool { + mcp = toLayeredPool(mcp) + mcp.Labels[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] = "image-pullspec" + return mcp + } + + toLayeredPoolWithConditionsSet := func(mcp *mcfgv1.MachineConfigPool, conditions []mcfgv1.MachineConfigPoolCondition) *mcfgv1.MachineConfigPool { + mcp = toLayeredPoolWithImagePullspec(mcp) + setMCPBuildConditions(mcp, conditions) + return mcp + } + + type shouldWeBuildTestCase struct { + name string + oldPool *mcfgv1.MachineConfigPool + curPool *mcfgv1.MachineConfigPool + buildPod *corev1.Pod + expected bool + } + + testCases := []shouldWeBuildTestCase{ + { + name: "Non-layered pool", + oldPool: newMachineConfigPool("worker", "rendered-worker-1"), + curPool: newMachineConfigPool("worker", "rendered-worker-1"), + expected: false, + }, + { + name: "Layered pool config change with missing image pullspec", + oldPool: toLayeredPool(newMachineConfigPool("worker", "rendered-worker-1")), + curPool: toLayeredPool(newMachineConfigPool("worker", "rendered-worker-2")), + expected: true, + }, + { + name: "Layered pool with no config change and missing image pullspec", + oldPool: toLayeredPool(newMachineConfigPool("worker", "rendered-worker-1")), + curPool: toLayeredPool(newMachineConfigPool("worker", "rendered-worker-1")), + expected: true, + }, + { + name: "Layered pool with image pullspec", + oldPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-1")), + curPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-1")), + }, + { + name: "Layered pool with build pod", + oldPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-1")), + curPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-1")), + buildPod: newBuildPod(newMachineConfigPool("worker", "rendered-worker-1")), + expected: false, + }, + } + + // Generate additional test cases programmatically. + buildStates := map[mcfgv1.MachineConfigPoolConditionType]string{ + mcfgv1.MachineConfigPoolBuildFailed: "failed", + mcfgv1.MachineConfigPoolBuildPending: "pending", + mcfgv1.MachineConfigPoolBuildSuccess: "successful", + mcfgv1.MachineConfigPoolBuilding: "in progress", + } + + for conditionType, name := range buildStates { + conditions := []mcfgv1.MachineConfigPoolCondition{ + { + Type: conditionType, + Status: corev1.ConditionTrue, + }, + } + + testCases = append(testCases, shouldWeBuildTestCase{ + name: fmt.Sprintf("Layered pool with %s build", name), + oldPool: toLayeredPoolWithConditionsSet(newMachineConfigPool("worker", "rendered-worker-1"), conditions), + curPool: toLayeredPoolWithConditionsSet(newMachineConfigPool("worker", "rendered-worker-1"), conditions), + expected: false, + }) + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + kubeClient := fakecorev1client.NewSimpleClientset() + if testCase.buildPod != nil { + kubeClient = fakecorev1client.NewSimpleClientset(testCase.buildPod) + } + + doABuild, err := shouldWeDoABuild(kubeClient, testCase.oldPool, testCase.curPool) + assert.NoError(t, err) + assert.Equal(t, testCase.expected, doABuild) + }) + } +} diff --git a/pkg/controller/build/helpers_test.go b/pkg/controller/build/helpers_test.go new file mode 100644 index 0000000000..76423dc077 --- /dev/null +++ b/pkg/controller/build/helpers_test.go @@ -0,0 +1,295 @@ +package build + +import ( + "context" + "fmt" + "io/ioutil" + "testing" + "time" + + "github.com/ghodss/yaml" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" +) + +// Creates a simple MachineConfigPool object for testing. +func newMachineConfigPool(name string, params ...string) *mcfgv1.MachineConfigPool { + renderedConfigName := "" + if len(params) >= 1 { + renderedConfigName = params[0] + } else { + renderedConfigName = fmt.Sprintf("rendered-%s-1", name) + } + + cfg := mcfgv1.MachineConfigPoolStatusConfiguration{ + ObjectReference: corev1.ObjectReference{ + Name: renderedConfigName, + }, + Source: []corev1.ObjectReference{ + { + Name: name + "-config-1", + Kind: "MachineConfig", + }, + { + Name: name + "-config-2", + Kind: "MachineConfig", + }, + { + Name: name + "-config-3", + Kind: "MachineConfig", + }, + }, + } + + return &mcfgv1.MachineConfigPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: map[string]string{}, + Labels: map[string]string{}, + }, + Spec: mcfgv1.MachineConfigPoolSpec{ + Configuration: cfg, + }, + Status: mcfgv1.MachineConfigPoolStatus{ + Configuration: cfg, + Conditions: []mcfgv1.MachineConfigPoolCondition{}, + }, + } +} + +// Opts a MachineConfigPool into layering. +func optInMCP(ctx context.Context, t *testing.T, bcc *buildControllerClientset, poolName string) *mcfgv1.MachineConfigPool { + t.Helper() + + mcp, err := bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + require.NoError(t, err) + + mcp.Labels = map[string]string{ + ctrlcommon.LayeringEnabledPoolLabel: "", + } + + mcp, err = bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().Update(ctx, mcp, metav1.UpdateOptions{}) + require.NoError(t, err) + + return mcp +} + +// Opts a MachineConfigPool out of layering. +func optOutMCP(ctx context.Context, t *testing.T, bcc *buildControllerClientset, poolName string) { + t.Helper() + + mcp, err := bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + require.NoError(t, err) + + delete(mcp.Labels, ctrlcommon.LayeringEnabledPoolLabel) + + _, err = bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().Update(ctx, mcp, metav1.UpdateOptions{}) + require.NoError(t, err) +} + +// Polls until a MachineConfigPool reaches a desired state. +func assertMachineConfigPoolReachesState(ctx context.Context, t *testing.T, bcc *buildControllerClientset, poolName string, checkFunc func(*mcfgv1.MachineConfigPool) bool) bool { + t.Helper() + + pollCtx, cancel := context.WithTimeout(ctx, time.Second*10) + t.Cleanup(cancel) + + err := wait.PollImmediateUntilWithContext(pollCtx, time.Millisecond, func(c context.Context) (bool, error) { + mcp, err := bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().Get(c, poolName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + return checkFunc(mcp), nil + }) + + return assert.NoError(t, err, "MachineConfigPool %s never reached desired state", poolName) +} + +// Asserts that there are no build pods. +func assertNoBuildPods(ctx context.Context, t *testing.T, bcc *buildControllerClientset) bool { + t.Helper() + + foundBuildPods := false + + buildPodNames := []string{} + + podList, err := bcc.KubeClient.CoreV1().Pods(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + for _, pod := range podList.Items { + pod := pod + if isBuildPod(&pod) { + foundBuildPods = true + buildPodNames = append(buildPodNames, pod.Name) + } + } + + return assert.False(t, foundBuildPods, "expected not to find build pods, found: %v", buildPodNames) +} + +// Polls until a build pod is created. +func assertBuildPodIsCreated(ctx context.Context, t *testing.T, bcc *buildControllerClientset, buildPodName string) bool { + t.Helper() + + err := wait.PollImmediateInfiniteWithContext(ctx, time.Millisecond, func(ctx context.Context) (bool, error) { + podList, err := bcc.KubeClient.CoreV1().Pods(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + + for _, pod := range podList.Items { + if pod.Name == buildPodName { + return true, nil + } + } + + return false, nil + }) + + return assert.NoError(t, err, "build pod %s was not created", buildPodName) +} + +// Simulates a pod being scheduled and reaching various states. Verifies that +// the target MachineConfigPool reaches the expected states as it goes. +func assertMCPFollowsBuildPodStatus(ctx context.Context, t *testing.T, bcc *buildControllerClientset, mcp *mcfgv1.MachineConfigPool, endingPhase corev1.PodPhase) (outcome bool) { + t.Helper() + + defer func() { + assert.True(t, outcome) + }() + + // Each of the various pod phases we're interested in. + podPhases := []corev1.PodPhase{ + corev1.PodPending, + corev1.PodRunning, + endingPhase, + } + + // Each pod phase is correllated to a MachineConfigPoolConditionType. + podPhaseToMCPCondition := map[corev1.PodPhase]mcfgv1.MachineConfigPoolConditionType{ + corev1.PodPending: mcfgv1.MachineConfigPoolBuildPending, + corev1.PodRunning: mcfgv1.MachineConfigPoolBuilding, + corev1.PodFailed: mcfgv1.MachineConfigPoolBuildFailed, + corev1.PodSucceeded: mcfgv1.MachineConfigPoolBuildSuccess, + } + + // Determine if the MachineConfigPool should have a reference to the build pod. + shouldHaveBuildPodRef := map[corev1.PodPhase]bool{ + corev1.PodPending: true, + corev1.PodRunning: true, + corev1.PodFailed: true, + corev1.PodSucceeded: false, + } + + buildPodName := getBuildPodName(mcp) + + // Wait for the build pod to be created. + outcome = assertBuildPodIsCreated(ctx, t, bcc, buildPodName) + if !outcome { + return + } + + // Cycle through each of the build pod phases. + for _, phase := range podPhases { + // Get the build pod by name. + buildPod, err := bcc.KubeClient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(ctx, buildPodName, metav1.GetOptions{}) + require.NoError(t, err) + + // Set the pod phase and update it. + buildPod.Status.Phase = phase + _, err = bcc.KubeClient.CoreV1().Pods(ctrlcommon.MCONamespace).Update(ctx, buildPod, metav1.UpdateOptions{}) + require.NoError(t, err) + + // Look up the expected MCP condition for our current pod phase. + expectedMCPCondition := podPhaseToMCPCondition[phase] + + // Look up the expected build pod condition for our current pod phase. + expectedBuildPodRefPresence := shouldHaveBuildPodRef[phase] + + // Wait for the MCP condition to reach the expected state. + outcome = assertMachineConfigPoolReachesState(ctx, t, bcc, mcp.Name, func(mcp *mcfgv1.MachineConfigPool) bool { + return mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, expectedMCPCondition) && + expectedBuildPodRefPresence == machineConfigPoolHasBuildPodRef(mcp) + }) + + if !outcome { + return false + } + } + + // Find out what happened to the build pod. + _, err := bcc.KubeClient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(ctx, buildPodName, metav1.GetOptions{}) + switch endingPhase { + case corev1.PodSucceeded: + // If the build pod was successful, looking it up should fail because it should have been deleted. + outcome = assert.Error(t, err) + case corev1.PodFailed: + // If the build pod failed, looking it up should succeed since we leave it around for debugging. + outcome = assert.NoError(t, err) + } + + return +} + +// Dumps all the objects within each of the fake clients to a YAML file for easy debugging. +func dumpObjects(ctx context.Context, t *testing.T, bcc *buildControllerClientset, filenamePrefix string) { + if bcc.ImageClient != nil { + images, err := bcc.ImageClient.ImageV1().Images().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, images, filenamePrefix+"-images.yaml") + + imagestreams, err := bcc.ImageClient.ImageV1().ImageStreams(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, imagestreams, filenamePrefix+"-imagestreams.yaml") + + imagestreamtags, err := bcc.ImageClient.ImageV1().ImageStreamTags(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, imagestreamtags, filenamePrefix+"-imagestreamtags.yaml") + } + + if bcc.McfgClient != nil { + mcp, err := bcc.McfgClient.MachineconfigurationV1().MachineConfigPools().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, mcp, filenamePrefix+"-machineconfigpools.yaml") + + machineconfigs, err := bcc.McfgClient.MachineconfigurationV1().MachineConfigs().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, machineconfigs, filenamePrefix+"-machineconfigs.yaml") + } + + if bcc.KubeClient != nil { + pods, err := bcc.KubeClient.CoreV1().Pods(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, pods, filenamePrefix+"-pods.yaml") + } + + if bcc.BuildClient != nil { + buildconfigs, err := bcc.BuildClient.BuildV1().BuildConfigs(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, buildconfigs, filenamePrefix+"-buildconfigs.yaml") + + builds, err := bcc.BuildClient.BuildV1().Builds(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + dumpToYAMLFile(t, builds, filenamePrefix+"-builds.yaml") + } +} + +func dumpToYAMLFile(t *testing.T, obj interface{}, filename string) { + out, err := yaml.Marshal(obj) + require.NoError(t, err) + + require.NoError(t, ioutil.WriteFile(filename, out, 0755)) +} diff --git a/pkg/controller/common/constants.go b/pkg/controller/common/constants.go index 12177c3618..571fc825f2 100644 --- a/pkg/controller/common/constants.go +++ b/pkg/controller/common/constants.go @@ -41,4 +41,13 @@ const ( MachineConfigPoolMaster = "master" // MachineConfigPoolWorker is the MachineConfigPool name given to the worker MachineConfigPoolWorker = "worker" + + // LayeringEnabledPoolLabel is the label that enables the "layered" workflow path for a pool. + LayeringEnabledPoolLabel = "machineconfiguration.openshift.io/layering-enabled" + + // ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey is the annotation that signifies which rendered config + // TODO(zzlotnik): Determine if we should use this still. + ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey = "machineconfiguration.openshift.io/newestImageEquivalentConfig" + + OSImageBuildPodLabel = "machineconfiguration.openshift.io/buildPod" ) diff --git a/pkg/controller/common/helpers.go b/pkg/controller/common/helpers.go index 81fd8d1838..2ef319ea13 100644 --- a/pkg/controller/common/helpers.go +++ b/pkg/controller/common/helpers.go @@ -1129,3 +1129,10 @@ func ReadDir(path string) ([]fs.FileInfo, error) { } return infos, nil } + +func IsLayeredPool(pool *mcfgv1.MachineConfigPool) bool { + if _, ok := pool.Labels[LayeringEnabledPoolLabel]; ok { + return true + } + return false +} diff --git a/pkg/daemon/constants/constants.go b/pkg/daemon/constants/constants.go index aaf272fd71..ce46ad69cf 100644 --- a/pkg/daemon/constants/constants.go +++ b/pkg/daemon/constants/constants.go @@ -8,6 +8,11 @@ const ( // // XXX + // CurrentImageAnnotationKey is used to get the current OS image pullspec for a machine + CurrentImageAnnotationKey = "machineconfiguration.openshift.io/currentImage" + // DesiredImageAnnotationKey is used to specify the desired OS image pullspec for a machine + DesiredImageAnnotationKey = "machineconfiguration.openshift.io/desiredImage" + // CurrentMachineConfigAnnotationKey is used to fetch current MachineConfig for a machine CurrentMachineConfigAnnotationKey = "machineconfiguration.openshift.io/currentConfig" // DesiredMachineConfigAnnotationKey is used to specify the desired MachineConfig for a machine diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go new file mode 100644 index 0000000000..d7c9c69806 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go @@ -0,0 +1,105 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + buildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + BuildV1() buildv1.BuildV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + buildV1 *buildv1.BuildV1Client +} + +// BuildV1 retrieves the BuildV1Client +func (c *Clientset) BuildV1() buildv1.BuildV1Interface { + return c.buildV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.buildV1, err = buildv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.buildV1 = buildv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go new file mode 100644 index 0000000000..0e0c2a8900 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..7070b0ed07 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,69 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/openshift/client-go/build/clientset/versioned" + buildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" + fakebuildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// BuildV1 retrieves the BuildV1Client +func (c *Clientset) BuildV1() buildv1.BuildV1Interface { + return &fakebuildv1.FakeBuildV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..3630ed1cd1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..c120bd193a --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + buildv1 "github.com/openshift/api/build/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + buildv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/doc.go new file mode 100644 index 0000000000..2b5ba4c8e4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build.go new file mode 100644 index 0000000000..ea364d7eff --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build.go @@ -0,0 +1,196 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + buildv1 "github.com/openshift/api/build/v1" + applyconfigurationsbuildv1 "github.com/openshift/client-go/build/applyconfigurations/build/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeBuilds implements BuildInterface +type FakeBuilds struct { + Fake *FakeBuildV1 + ns string +} + +var buildsResource = schema.GroupVersionResource{Group: "build.openshift.io", Version: "v1", Resource: "builds"} + +var buildsKind = schema.GroupVersionKind{Group: "build.openshift.io", Version: "v1", Kind: "Build"} + +// Get takes name of the build, and returns the corresponding build object, and an error if there is any. +func (c *FakeBuilds) Get(ctx context.Context, name string, options v1.GetOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(buildsResource, c.ns, name), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// List takes label and field selectors, and returns the list of Builds that match those selectors. +func (c *FakeBuilds) List(ctx context.Context, opts v1.ListOptions) (result *buildv1.BuildList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(buildsResource, buildsKind, c.ns, opts), &buildv1.BuildList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &buildv1.BuildList{ListMeta: obj.(*buildv1.BuildList).ListMeta} + for _, item := range obj.(*buildv1.BuildList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested builds. +func (c *FakeBuilds) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(buildsResource, c.ns, opts)) + +} + +// Create takes the representation of a build and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) Create(ctx context.Context, build *buildv1.Build, opts v1.CreateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(buildsResource, c.ns, build), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// Update takes the representation of a build and updates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) Update(ctx context.Context, build *buildv1.Build, opts v1.UpdateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(buildsResource, c.ns, build), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeBuilds) UpdateStatus(ctx context.Context, build *buildv1.Build, opts v1.UpdateOptions) (*buildv1.Build, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(buildsResource, "status", c.ns, build), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// Delete takes name of the build and deletes it. Returns an error if one occurs. +func (c *FakeBuilds) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(buildsResource, c.ns, name, opts), &buildv1.Build{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeBuilds) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(buildsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &buildv1.BuildList{}) + return err +} + +// Patch applies the patch and returns the patched build. +func (c *FakeBuilds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildsResource, c.ns, name, pt, data, subresources...), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied build. +func (c *FakeBuilds) Apply(ctx context.Context, build *applyconfigurationsbuildv1.BuildApplyConfiguration, opts v1.ApplyOptions) (result *buildv1.Build, err error) { + if build == nil { + return nil, fmt.Errorf("build provided to Apply must not be nil") + } + data, err := json.Marshal(build) + if err != nil { + return nil, err + } + name := build.Name + if name == nil { + return nil, fmt.Errorf("build.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildsResource, c.ns, *name, types.ApplyPatchType, data), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeBuilds) ApplyStatus(ctx context.Context, build *applyconfigurationsbuildv1.BuildApplyConfiguration, opts v1.ApplyOptions) (result *buildv1.Build, err error) { + if build == nil { + return nil, fmt.Errorf("build provided to Apply must not be nil") + } + data, err := json.Marshal(build) + if err != nil { + return nil, err + } + name := build.Name + if name == nil { + return nil, fmt.Errorf("build.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// UpdateDetails takes the representation of a build and updates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) UpdateDetails(ctx context.Context, buildName string, build *buildv1.Build, opts v1.UpdateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(buildsResource, "details", c.ns, build), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// Clone takes the representation of a buildRequest and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) Clone(ctx context.Context, buildName string, buildRequest *buildv1.BuildRequest, opts v1.CreateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateSubresourceAction(buildsResource, buildName, "clone", c.ns, buildRequest), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build_client.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build_client.go new file mode 100644 index 0000000000..31cdb947c4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build_client.go @@ -0,0 +1,28 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeBuildV1 struct { + *testing.Fake +} + +func (c *FakeBuildV1) Builds(namespace string) v1.BuildInterface { + return &FakeBuilds{c, namespace} +} + +func (c *FakeBuildV1) BuildConfigs(namespace string) v1.BuildConfigInterface { + return &FakeBuildConfigs{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeBuildV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_buildconfig.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_buildconfig.go new file mode 100644 index 0000000000..44e79e3773 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_buildconfig.go @@ -0,0 +1,185 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + buildv1 "github.com/openshift/api/build/v1" + applyconfigurationsbuildv1 "github.com/openshift/client-go/build/applyconfigurations/build/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeBuildConfigs implements BuildConfigInterface +type FakeBuildConfigs struct { + Fake *FakeBuildV1 + ns string +} + +var buildconfigsResource = schema.GroupVersionResource{Group: "build.openshift.io", Version: "v1", Resource: "buildconfigs"} + +var buildconfigsKind = schema.GroupVersionKind{Group: "build.openshift.io", Version: "v1", Kind: "BuildConfig"} + +// Get takes name of the buildConfig, and returns the corresponding buildConfig object, and an error if there is any. +func (c *FakeBuildConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *buildv1.BuildConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(buildconfigsResource, c.ns, name), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// List takes label and field selectors, and returns the list of BuildConfigs that match those selectors. +func (c *FakeBuildConfigs) List(ctx context.Context, opts v1.ListOptions) (result *buildv1.BuildConfigList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(buildconfigsResource, buildconfigsKind, c.ns, opts), &buildv1.BuildConfigList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &buildv1.BuildConfigList{ListMeta: obj.(*buildv1.BuildConfigList).ListMeta} + for _, item := range obj.(*buildv1.BuildConfigList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested buildConfigs. +func (c *FakeBuildConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(buildconfigsResource, c.ns, opts)) + +} + +// Create takes the representation of a buildConfig and creates it. Returns the server's representation of the buildConfig, and an error, if there is any. +func (c *FakeBuildConfigs) Create(ctx context.Context, buildConfig *buildv1.BuildConfig, opts v1.CreateOptions) (result *buildv1.BuildConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(buildconfigsResource, c.ns, buildConfig), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// Update takes the representation of a buildConfig and updates it. Returns the server's representation of the buildConfig, and an error, if there is any. +func (c *FakeBuildConfigs) Update(ctx context.Context, buildConfig *buildv1.BuildConfig, opts v1.UpdateOptions) (result *buildv1.BuildConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(buildconfigsResource, c.ns, buildConfig), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeBuildConfigs) UpdateStatus(ctx context.Context, buildConfig *buildv1.BuildConfig, opts v1.UpdateOptions) (*buildv1.BuildConfig, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(buildconfigsResource, "status", c.ns, buildConfig), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// Delete takes name of the buildConfig and deletes it. Returns an error if one occurs. +func (c *FakeBuildConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(buildconfigsResource, c.ns, name, opts), &buildv1.BuildConfig{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeBuildConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(buildconfigsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &buildv1.BuildConfigList{}) + return err +} + +// Patch applies the patch and returns the patched buildConfig. +func (c *FakeBuildConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *buildv1.BuildConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildconfigsResource, c.ns, name, pt, data, subresources...), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied buildConfig. +func (c *FakeBuildConfigs) Apply(ctx context.Context, buildConfig *applyconfigurationsbuildv1.BuildConfigApplyConfiguration, opts v1.ApplyOptions) (result *buildv1.BuildConfig, err error) { + if buildConfig == nil { + return nil, fmt.Errorf("buildConfig provided to Apply must not be nil") + } + data, err := json.Marshal(buildConfig) + if err != nil { + return nil, err + } + name := buildConfig.Name + if name == nil { + return nil, fmt.Errorf("buildConfig.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildconfigsResource, c.ns, *name, types.ApplyPatchType, data), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeBuildConfigs) ApplyStatus(ctx context.Context, buildConfig *applyconfigurationsbuildv1.BuildConfigApplyConfiguration, opts v1.ApplyOptions) (result *buildv1.BuildConfig, err error) { + if buildConfig == nil { + return nil, fmt.Errorf("buildConfig provided to Apply must not be nil") + } + data, err := json.Marshal(buildConfig) + if err != nil { + return nil, err + } + name := buildConfig.Name + if name == nil { + return nil, fmt.Errorf("buildConfig.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildconfigsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// Instantiate takes the representation of a buildRequest and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuildConfigs) Instantiate(ctx context.Context, buildConfigName string, buildRequest *buildv1.BuildRequest, opts v1.CreateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateSubresourceAction(buildconfigsResource, buildConfigName, "instantiate", c.ns, buildRequest), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go new file mode 100644 index 0000000000..01a651928a --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package build + +import ( + v1 "github.com/openshift/client-go/build/informers/externalversions/build/v1" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go new file mode 100644 index 0000000000..2055ed96f5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + buildv1 "github.com/openshift/api/build/v1" + versioned "github.com/openshift/client-go/build/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/build/listers/build/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BuildInformer provides access to a shared informer and lister for +// Builds. +type BuildInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.BuildLister +} + +type buildInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBuildInformer constructs a new informer for Build type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBuildInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBuildInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBuildInformer constructs a new informer for Build type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBuildInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().Builds(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().Builds(namespace).Watch(context.TODO(), options) + }, + }, + &buildv1.Build{}, + resyncPeriod, + indexers, + ) +} + +func (f *buildInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBuildInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *buildInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&buildv1.Build{}, f.defaultInformer) +} + +func (f *buildInformer) Lister() v1.BuildLister { + return v1.NewBuildLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go new file mode 100644 index 0000000000..28012f8c6d --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + buildv1 "github.com/openshift/api/build/v1" + versioned "github.com/openshift/client-go/build/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/build/listers/build/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BuildConfigInformer provides access to a shared informer and lister for +// BuildConfigs. +type BuildConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.BuildConfigLister +} + +type buildConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBuildConfigInformer constructs a new informer for BuildConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBuildConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBuildConfigInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBuildConfigInformer constructs a new informer for BuildConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBuildConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().BuildConfigs(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().BuildConfigs(namespace).Watch(context.TODO(), options) + }, + }, + &buildv1.BuildConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *buildConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBuildConfigInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *buildConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&buildv1.BuildConfig{}, f.defaultInformer) +} + +func (f *buildConfigInformer) Lister() v1.BuildConfigLister { + return v1.NewBuildConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go new file mode 100644 index 0000000000..da69fc9bb6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go @@ -0,0 +1,36 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Builds returns a BuildInformer. + Builds() BuildInformer + // BuildConfigs returns a BuildConfigInformer. + BuildConfigs() BuildConfigInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Builds returns a BuildInformer. +func (v *version) Builds() BuildInformer { + return &buildInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// BuildConfigs returns a BuildConfigInformer. +func (v *version) BuildConfigs() BuildConfigInformer { + return &buildConfigInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go new file mode 100644 index 0000000000..fadac908e0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go @@ -0,0 +1,164 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/build/clientset/versioned" + build "github.com/openshift/client-go/build/informers/externalversions/build" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Build() build.Interface +} + +func (f *sharedInformerFactory) Build() build.Interface { + return build.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go new file mode 100644 index 0000000000..e8b2035b70 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go @@ -0,0 +1,48 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/openshift/api/build/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=build.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("builds"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Build().V1().Builds().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("buildconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Build().V1().BuildConfigs().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..1bcbd5975a --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/build/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go new file mode 100644 index 0000000000..e072f9bac8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// BuildLister helps list Builds. +// All objects returned here must be treated as read-only. +type BuildLister interface { + // List lists all Builds in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Build, err error) + // Builds returns an object that can list and get Builds. + Builds(namespace string) BuildNamespaceLister + BuildListerExpansion +} + +// buildLister implements the BuildLister interface. +type buildLister struct { + indexer cache.Indexer +} + +// NewBuildLister returns a new BuildLister. +func NewBuildLister(indexer cache.Indexer) BuildLister { + return &buildLister{indexer: indexer} +} + +// List lists all Builds in the indexer. +func (s *buildLister) List(selector labels.Selector) (ret []*v1.Build, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Build)) + }) + return ret, err +} + +// Builds returns an object that can list and get Builds. +func (s *buildLister) Builds(namespace string) BuildNamespaceLister { + return buildNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// BuildNamespaceLister helps list and get Builds. +// All objects returned here must be treated as read-only. +type BuildNamespaceLister interface { + // List lists all Builds in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Build, err error) + // Get retrieves the Build from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Build, error) + BuildNamespaceListerExpansion +} + +// buildNamespaceLister implements the BuildNamespaceLister +// interface. +type buildNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Builds in the indexer for a given namespace. +func (s buildNamespaceLister) List(selector labels.Selector) (ret []*v1.Build, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Build)) + }) + return ret, err +} + +// Get retrieves the Build from the indexer for a given namespace and name. +func (s buildNamespaceLister) Get(name string) (*v1.Build, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("build"), name) + } + return obj.(*v1.Build), nil +} diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go new file mode 100644 index 0000000000..d2bbdb4ec6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// BuildConfigLister helps list BuildConfigs. +// All objects returned here must be treated as read-only. +type BuildConfigLister interface { + // List lists all BuildConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.BuildConfig, err error) + // BuildConfigs returns an object that can list and get BuildConfigs. + BuildConfigs(namespace string) BuildConfigNamespaceLister + BuildConfigListerExpansion +} + +// buildConfigLister implements the BuildConfigLister interface. +type buildConfigLister struct { + indexer cache.Indexer +} + +// NewBuildConfigLister returns a new BuildConfigLister. +func NewBuildConfigLister(indexer cache.Indexer) BuildConfigLister { + return &buildConfigLister{indexer: indexer} +} + +// List lists all BuildConfigs in the indexer. +func (s *buildConfigLister) List(selector labels.Selector) (ret []*v1.BuildConfig, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BuildConfig)) + }) + return ret, err +} + +// BuildConfigs returns an object that can list and get BuildConfigs. +func (s *buildConfigLister) BuildConfigs(namespace string) BuildConfigNamespaceLister { + return buildConfigNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// BuildConfigNamespaceLister helps list and get BuildConfigs. +// All objects returned here must be treated as read-only. +type BuildConfigNamespaceLister interface { + // List lists all BuildConfigs in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.BuildConfig, err error) + // Get retrieves the BuildConfig from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.BuildConfig, error) + BuildConfigNamespaceListerExpansion +} + +// buildConfigNamespaceLister implements the BuildConfigNamespaceLister +// interface. +type buildConfigNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all BuildConfigs in the indexer for a given namespace. +func (s buildConfigNamespaceLister) List(selector labels.Selector) (ret []*v1.BuildConfig, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BuildConfig)) + }) + return ret, err +} + +// Get retrieves the BuildConfig from the indexer for a given namespace and name. +func (s buildConfigNamespaceLister) Get(name string) (*v1.BuildConfig, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("buildconfig"), name) + } + return obj.(*v1.BuildConfig), nil +} diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go new file mode 100644 index 0000000000..1fc9faecdd --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go @@ -0,0 +1,19 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// BuildListerExpansion allows custom methods to be added to +// BuildLister. +type BuildListerExpansion interface{} + +// BuildNamespaceListerExpansion allows custom methods to be added to +// BuildNamespaceLister. +type BuildNamespaceListerExpansion interface{} + +// BuildConfigListerExpansion allows custom methods to be added to +// BuildConfigLister. +type BuildConfigListerExpansion interface{} + +// BuildConfigNamespaceListerExpansion allows custom methods to be added to +// BuildConfigNamespaceLister. +type BuildConfigNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go new file mode 100644 index 0000000000..b0ebcebf51 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go @@ -0,0 +1,105 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + imagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + ImageV1() imagev1.ImageV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + imageV1 *imagev1.ImageV1Client +} + +// ImageV1 retrieves the ImageV1Client +func (c *Clientset) ImageV1() imagev1.ImageV1Interface { + return c.imageV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.imageV1, err = imagev1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.imageV1 = imagev1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go new file mode 100644 index 0000000000..0e0c2a8900 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..dfb57b4e10 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,69 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/openshift/client-go/image/clientset/versioned" + imagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + fakeimagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// ImageV1 retrieves the ImageV1Client +func (c *Clientset) ImageV1() imagev1.ImageV1Interface { + return &fakeimagev1.FakeImageV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..3630ed1cd1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..d7efdf27ee --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + imagev1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/doc.go new file mode 100644 index 0000000000..2b5ba4c8e4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image.go new file mode 100644 index 0000000000..c32387af93 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image.go @@ -0,0 +1,130 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + imagev1 "github.com/openshift/api/image/v1" + applyconfigurationsimagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImages implements ImageInterface +type FakeImages struct { + Fake *FakeImageV1 +} + +var imagesResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "images"} + +var imagesKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "Image"} + +// Get takes name of the image, and returns the corresponding image object, and an error if there is any. +func (c *FakeImages) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(imagesResource, name), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} + +// List takes label and field selectors, and returns the list of Images that match those selectors. +func (c *FakeImages) List(ctx context.Context, opts v1.ListOptions) (result *imagev1.ImageList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(imagesResource, imagesKind, opts), &imagev1.ImageList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &imagev1.ImageList{ListMeta: obj.(*imagev1.ImageList).ListMeta} + for _, item := range obj.(*imagev1.ImageList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested images. +func (c *FakeImages) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(imagesResource, opts)) +} + +// Create takes the representation of a image and creates it. Returns the server's representation of the image, and an error, if there is any. +func (c *FakeImages) Create(ctx context.Context, image *imagev1.Image, opts v1.CreateOptions) (result *imagev1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(imagesResource, image), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} + +// Update takes the representation of a image and updates it. Returns the server's representation of the image, and an error, if there is any. +func (c *FakeImages) Update(ctx context.Context, image *imagev1.Image, opts v1.UpdateOptions) (result *imagev1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(imagesResource, image), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} + +// Delete takes name of the image and deletes it. Returns an error if one occurs. +func (c *FakeImages) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(imagesResource, name, opts), &imagev1.Image{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImages) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(imagesResource, listOpts) + + _, err := c.Fake.Invokes(action, &imagev1.ImageList{}) + return err +} + +// Patch applies the patch and returns the patched image. +func (c *FakeImages) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *imagev1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagesResource, name, pt, data, subresources...), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied image. +func (c *FakeImages) Apply(ctx context.Context, image *applyconfigurationsimagev1.ImageApplyConfiguration, opts v1.ApplyOptions) (result *imagev1.Image, err error) { + if image == nil { + return nil, fmt.Errorf("image provided to Apply must not be nil") + } + data, err := json.Marshal(image) + if err != nil { + return nil, err + } + name := image.Name + if name == nil { + return nil, fmt.Errorf("image.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagesResource, *name, types.ApplyPatchType, data), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image_client.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image_client.go new file mode 100644 index 0000000000..c135a79bb6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image_client.go @@ -0,0 +1,52 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeImageV1 struct { + *testing.Fake +} + +func (c *FakeImageV1) Images() v1.ImageInterface { + return &FakeImages{c} +} + +func (c *FakeImageV1) ImageSignatures() v1.ImageSignatureInterface { + return &FakeImageSignatures{c} +} + +func (c *FakeImageV1) ImageStreams(namespace string) v1.ImageStreamInterface { + return &FakeImageStreams{c, namespace} +} + +func (c *FakeImageV1) ImageStreamImages(namespace string) v1.ImageStreamImageInterface { + return &FakeImageStreamImages{c, namespace} +} + +func (c *FakeImageV1) ImageStreamImports(namespace string) v1.ImageStreamImportInterface { + return &FakeImageStreamImports{c, namespace} +} + +func (c *FakeImageV1) ImageStreamMappings(namespace string) v1.ImageStreamMappingInterface { + return &FakeImageStreamMappings{c, namespace} +} + +func (c *FakeImageV1) ImageStreamTags(namespace string) v1.ImageStreamTagInterface { + return &FakeImageStreamTags{c, namespace} +} + +func (c *FakeImageV1) ImageTags(namespace string) v1.ImageTagInterface { + return &FakeImageTags{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeImageV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagesignature.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagesignature.go new file mode 100644 index 0000000000..0ff22e2fd2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagesignature.go @@ -0,0 +1,38 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/openshift/api/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageSignatures implements ImageSignatureInterface +type FakeImageSignatures struct { + Fake *FakeImageV1 +} + +var imagesignaturesResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagesignatures"} + +var imagesignaturesKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageSignature"} + +// Create takes the representation of a imageSignature and creates it. Returns the server's representation of the imageSignature, and an error, if there is any. +func (c *FakeImageSignatures) Create(ctx context.Context, imageSignature *v1.ImageSignature, opts metav1.CreateOptions) (result *v1.ImageSignature, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(imagesignaturesResource, imageSignature), &v1.ImageSignature{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ImageSignature), err +} + +// Delete takes name of the imageSignature and deletes it. Returns an error if one occurs. +func (c *FakeImageSignatures) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(imagesignaturesResource, name, opts), &v1.ImageSignature{}) + return err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go new file mode 100644 index 0000000000..7db6c8a822 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go @@ -0,0 +1,196 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + imagev1 "github.com/openshift/api/image/v1" + applyconfigurationsimagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreams implements ImageStreamInterface +type FakeImageStreams struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreams"} + +var imagestreamsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStream"} + +// Get takes name of the imageStream, and returns the corresponding imageStream object, and an error if there is any. +func (c *FakeImageStreams) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageStream, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagestreamsResource, c.ns, name), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// List takes label and field selectors, and returns the list of ImageStreams that match those selectors. +func (c *FakeImageStreams) List(ctx context.Context, opts v1.ListOptions) (result *imagev1.ImageStreamList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(imagestreamsResource, imagestreamsKind, c.ns, opts), &imagev1.ImageStreamList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &imagev1.ImageStreamList{ListMeta: obj.(*imagev1.ImageStreamList).ListMeta} + for _, item := range obj.(*imagev1.ImageStreamList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested imageStreams. +func (c *FakeImageStreams) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(imagestreamsResource, c.ns, opts)) + +} + +// Create takes the representation of a imageStream and creates it. Returns the server's representation of the imageStream, and an error, if there is any. +func (c *FakeImageStreams) Create(ctx context.Context, imageStream *imagev1.ImageStream, opts v1.CreateOptions) (result *imagev1.ImageStream, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagestreamsResource, c.ns, imageStream), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// Update takes the representation of a imageStream and updates it. Returns the server's representation of the imageStream, and an error, if there is any. +func (c *FakeImageStreams) Update(ctx context.Context, imageStream *imagev1.ImageStream, opts v1.UpdateOptions) (result *imagev1.ImageStream, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(imagestreamsResource, c.ns, imageStream), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeImageStreams) UpdateStatus(ctx context.Context, imageStream *imagev1.ImageStream, opts v1.UpdateOptions) (*imagev1.ImageStream, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(imagestreamsResource, "status", c.ns, imageStream), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// Delete takes name of the imageStream and deletes it. Returns an error if one occurs. +func (c *FakeImageStreams) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(imagestreamsResource, c.ns, name, opts), &imagev1.ImageStream{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImageStreams) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(imagestreamsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &imagev1.ImageStreamList{}) + return err +} + +// Patch applies the patch and returns the patched imageStream. +func (c *FakeImageStreams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *imagev1.ImageStream, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(imagestreamsResource, c.ns, name, pt, data, subresources...), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied imageStream. +func (c *FakeImageStreams) Apply(ctx context.Context, imageStream *applyconfigurationsimagev1.ImageStreamApplyConfiguration, opts v1.ApplyOptions) (result *imagev1.ImageStream, err error) { + if imageStream == nil { + return nil, fmt.Errorf("imageStream provided to Apply must not be nil") + } + data, err := json.Marshal(imageStream) + if err != nil { + return nil, err + } + name := imageStream.Name + if name == nil { + return nil, fmt.Errorf("imageStream.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(imagestreamsResource, c.ns, *name, types.ApplyPatchType, data), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeImageStreams) ApplyStatus(ctx context.Context, imageStream *applyconfigurationsimagev1.ImageStreamApplyConfiguration, opts v1.ApplyOptions) (result *imagev1.ImageStream, err error) { + if imageStream == nil { + return nil, fmt.Errorf("imageStream provided to Apply must not be nil") + } + data, err := json.Marshal(imageStream) + if err != nil { + return nil, err + } + name := imageStream.Name + if name == nil { + return nil, fmt.Errorf("imageStream.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(imagestreamsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// Secrets takes name of the imageStream, and returns the corresponding secretList object, and an error if there is any. +func (c *FakeImageStreams) Secrets(ctx context.Context, imageStreamName string, options v1.GetOptions) (result *imagev1.SecretList, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(imagestreamsResource, c.ns, "secrets", imageStreamName), &imagev1.SecretList{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.SecretList), err +} + +// Layers takes name of the imageStream, and returns the corresponding imageStreamLayers object, and an error if there is any. +func (c *FakeImageStreams) Layers(ctx context.Context, imageStreamName string, options v1.GetOptions) (result *imagev1.ImageStreamLayers, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(imagestreamsResource, c.ns, "layers", imageStreamName), &imagev1.ImageStreamLayers{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamLayers), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimage.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimage.go new file mode 100644 index 0000000000..aa97914259 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimage.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamImages implements ImageStreamImageInterface +type FakeImageStreamImages struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamimagesResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreamimages"} + +var imagestreamimagesKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStreamImage"} + +// Get takes name of the imageStreamImage, and returns the corresponding imageStreamImage object, and an error if there is any. +func (c *FakeImageStreamImages) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageStreamImage, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagestreamimagesResource, c.ns, name), &imagev1.ImageStreamImage{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamImage), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimport.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimport.go new file mode 100644 index 0000000000..5e7e5da326 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimport.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/openshift/api/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamImports implements ImageStreamImportInterface +type FakeImageStreamImports struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamimportsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreamimports"} + +var imagestreamimportsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStreamImport"} + +// Create takes the representation of a imageStreamImport and creates it. Returns the server's representation of the imageStreamImport, and an error, if there is any. +func (c *FakeImageStreamImports) Create(ctx context.Context, imageStreamImport *v1.ImageStreamImport, opts metav1.CreateOptions) (result *v1.ImageStreamImport, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagestreamimportsResource, c.ns, imageStreamImport), &v1.ImageStreamImport{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ImageStreamImport), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreammapping.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreammapping.go new file mode 100644 index 0000000000..d50ddbd1ed --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreammapping.go @@ -0,0 +1,59 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + imagev1 "github.com/openshift/api/image/v1" + v1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamMappings implements ImageStreamMappingInterface +type FakeImageStreamMappings struct { + Fake *FakeImageV1 + ns string +} + +var imagestreammappingsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreammappings"} + +var imagestreammappingsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStreamMapping"} + +// Apply takes the given apply declarative configuration, applies it and returns the applied imageStreamMapping. +func (c *FakeImageStreamMappings) Apply(ctx context.Context, imageStreamMapping *v1.ImageStreamMappingApplyConfiguration, opts metav1.ApplyOptions) (result *imagev1.ImageStreamMapping, err error) { + if imageStreamMapping == nil { + return nil, fmt.Errorf("imageStreamMapping provided to Apply must not be nil") + } + data, err := json.Marshal(imageStreamMapping) + if err != nil { + return nil, err + } + name := imageStreamMapping.Name + if name == nil { + return nil, fmt.Errorf("imageStreamMapping.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(imagestreammappingsResource, c.ns, *name, types.ApplyPatchType, data), &imagev1.ImageStreamMapping{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamMapping), err +} + +// Create takes the representation of a imageStreamMapping and creates it. Returns the server's representation of the status, and an error, if there is any. +func (c *FakeImageStreamMappings) Create(ctx context.Context, imageStreamMapping *imagev1.ImageStreamMapping, opts metav1.CreateOptions) (result *metav1.Status, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagestreammappingsResource, c.ns, imageStreamMapping), &metav1.Status{}) + + if obj == nil { + return nil, err + } + return obj.(*metav1.Status), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamtag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamtag.go new file mode 100644 index 0000000000..0befdecac1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamtag.go @@ -0,0 +1,86 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamTags implements ImageStreamTagInterface +type FakeImageStreamTags struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamtagsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreamtags"} + +var imagestreamtagsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStreamTag"} + +// Get takes name of the imageStreamTag, and returns the corresponding imageStreamTag object, and an error if there is any. +func (c *FakeImageStreamTags) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageStreamTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagestreamtagsResource, c.ns, name), &imagev1.ImageStreamTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamTag), err +} + +// List takes label and field selectors, and returns the list of ImageStreamTags that match those selectors. +func (c *FakeImageStreamTags) List(ctx context.Context, opts v1.ListOptions) (result *imagev1.ImageStreamTagList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(imagestreamtagsResource, imagestreamtagsKind, c.ns, opts), &imagev1.ImageStreamTagList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &imagev1.ImageStreamTagList{ListMeta: obj.(*imagev1.ImageStreamTagList).ListMeta} + for _, item := range obj.(*imagev1.ImageStreamTagList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Create takes the representation of a imageStreamTag and creates it. Returns the server's representation of the imageStreamTag, and an error, if there is any. +func (c *FakeImageStreamTags) Create(ctx context.Context, imageStreamTag *imagev1.ImageStreamTag, opts v1.CreateOptions) (result *imagev1.ImageStreamTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagestreamtagsResource, c.ns, imageStreamTag), &imagev1.ImageStreamTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamTag), err +} + +// Update takes the representation of a imageStreamTag and updates it. Returns the server's representation of the imageStreamTag, and an error, if there is any. +func (c *FakeImageStreamTags) Update(ctx context.Context, imageStreamTag *imagev1.ImageStreamTag, opts v1.UpdateOptions) (result *imagev1.ImageStreamTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(imagestreamtagsResource, c.ns, imageStreamTag), &imagev1.ImageStreamTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamTag), err +} + +// Delete takes name of the imageStreamTag and deletes it. Returns an error if one occurs. +func (c *FakeImageStreamTags) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(imagestreamtagsResource, c.ns, name, opts), &imagev1.ImageStreamTag{}) + + return err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagetag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagetag.go new file mode 100644 index 0000000000..6bf41d7d97 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagetag.go @@ -0,0 +1,86 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageTags implements ImageTagInterface +type FakeImageTags struct { + Fake *FakeImageV1 + ns string +} + +var imagetagsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagetags"} + +var imagetagsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageTag"} + +// Get takes name of the imageTag, and returns the corresponding imageTag object, and an error if there is any. +func (c *FakeImageTags) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagetagsResource, c.ns, name), &imagev1.ImageTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageTag), err +} + +// List takes label and field selectors, and returns the list of ImageTags that match those selectors. +func (c *FakeImageTags) List(ctx context.Context, opts v1.ListOptions) (result *imagev1.ImageTagList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(imagetagsResource, imagetagsKind, c.ns, opts), &imagev1.ImageTagList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &imagev1.ImageTagList{ListMeta: obj.(*imagev1.ImageTagList).ListMeta} + for _, item := range obj.(*imagev1.ImageTagList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Create takes the representation of a imageTag and creates it. Returns the server's representation of the imageTag, and an error, if there is any. +func (c *FakeImageTags) Create(ctx context.Context, imageTag *imagev1.ImageTag, opts v1.CreateOptions) (result *imagev1.ImageTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagetagsResource, c.ns, imageTag), &imagev1.ImageTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageTag), err +} + +// Update takes the representation of a imageTag and updates it. Returns the server's representation of the imageTag, and an error, if there is any. +func (c *FakeImageTags) Update(ctx context.Context, imageTag *imagev1.ImageTag, opts v1.UpdateOptions) (result *imagev1.ImageTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(imagetagsResource, c.ns, imageTag), &imagev1.ImageTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageTag), err +} + +// Delete takes name of the imageTag and deletes it. Returns an error if one occurs. +func (c *FakeImageTags) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(imagetagsResource, c.ns, name, opts), &imagev1.ImageTag{}) + + return err +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go new file mode 100644 index 0000000000..067795180f --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go @@ -0,0 +1,164 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/image/clientset/versioned" + image "github.com/openshift/client-go/image/informers/externalversions/image" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Image() image.Interface +} + +func (f *sharedInformerFactory) Image() image.Interface { + return image.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go new file mode 100644 index 0000000000..55f59dedef --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go @@ -0,0 +1,48 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/openshift/api/image/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=image.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("images"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Image().V1().Images().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("imagestreams"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Image().V1().ImageStreams().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go new file mode 100644 index 0000000000..092550ed3d --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package image + +import ( + v1 "github.com/openshift/client-go/image/informers/externalversions/image/v1" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go new file mode 100644 index 0000000000..ee2d0a7067 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + imagev1 "github.com/openshift/api/image/v1" + versioned "github.com/openshift/client-go/image/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/image/listers/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageInformer provides access to a shared informer and lister for +// Images. +type ImageInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ImageLister +} + +type imageInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().Images().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().Images().Watch(context.TODO(), options) + }, + }, + &imagev1.Image{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&imagev1.Image{}, f.defaultInformer) +} + +func (f *imageInformer) Lister() v1.ImageLister { + return v1.NewImageLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go new file mode 100644 index 0000000000..4a94cc5c7d --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + imagev1 "github.com/openshift/api/image/v1" + versioned "github.com/openshift/client-go/image/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/image/listers/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageStreamInformer provides access to a shared informer and lister for +// ImageStreams. +type ImageStreamInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ImageStreamLister +} + +type imageStreamInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewImageStreamInformer constructs a new informer for ImageStream type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageStreamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageStreamInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredImageStreamInformer constructs a new informer for ImageStream type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageStreamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().ImageStreams(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().ImageStreams(namespace).Watch(context.TODO(), options) + }, + }, + &imagev1.ImageStream{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageStreamInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageStreamInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageStreamInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&imagev1.ImageStream{}, f.defaultInformer) +} + +func (f *imageStreamInformer) Lister() v1.ImageStreamLister { + return v1.NewImageStreamLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go new file mode 100644 index 0000000000..fd35c4df1a --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go @@ -0,0 +1,36 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Images returns a ImageInformer. + Images() ImageInformer + // ImageStreams returns a ImageStreamInformer. + ImageStreams() ImageStreamInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Images returns a ImageInformer. +func (v *version) Images() ImageInformer { + return &imageInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ImageStreams returns a ImageStreamInformer. +func (v *version) ImageStreams() ImageStreamInformer { + return &imageStreamInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..c35dcbfa44 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/image/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go new file mode 100644 index 0000000000..308b6db702 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go @@ -0,0 +1,31 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// ImageListerExpansion allows custom methods to be added to +// ImageLister. +type ImageListerExpansion interface{} + +// ImageStreamListerExpansion allows custom methods to be added to +// ImageStreamLister. +type ImageStreamListerExpansion interface{} + +// ImageStreamNamespaceListerExpansion allows custom methods to be added to +// ImageStreamNamespaceLister. +type ImageStreamNamespaceListerExpansion interface{} + +// ImageStreamTagListerExpansion allows custom methods to be added to +// ImageStreamTagLister. +type ImageStreamTagListerExpansion interface{} + +// ImageStreamTagNamespaceListerExpansion allows custom methods to be added to +// ImageStreamTagNamespaceLister. +type ImageStreamTagNamespaceListerExpansion interface{} + +// ImageTagListerExpansion allows custom methods to be added to +// ImageTagLister. +type ImageTagListerExpansion interface{} + +// ImageTagNamespaceListerExpansion allows custom methods to be added to +// ImageTagNamespaceLister. +type ImageTagNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go new file mode 100644 index 0000000000..bb66460a77 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go @@ -0,0 +1,52 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ImageLister helps list Images. +// All objects returned here must be treated as read-only. +type ImageLister interface { + // List lists all Images in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Image, err error) + // Get retrieves the Image from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Image, error) + ImageListerExpansion +} + +// imageLister implements the ImageLister interface. +type imageLister struct { + indexer cache.Indexer +} + +// NewImageLister returns a new ImageLister. +func NewImageLister(indexer cache.Indexer) ImageLister { + return &imageLister{indexer: indexer} +} + +// List lists all Images in the indexer. +func (s *imageLister) List(selector labels.Selector) (ret []*v1.Image, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Image)) + }) + return ret, err +} + +// Get retrieves the Image from the index for a given name. +func (s *imageLister) Get(name string) (*v1.Image, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("image"), name) + } + return obj.(*v1.Image), nil +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go new file mode 100644 index 0000000000..02ed4da365 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ImageStreamLister helps list ImageStreams. +// All objects returned here must be treated as read-only. +type ImageStreamLister interface { + // List lists all ImageStreams in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStream, err error) + // ImageStreams returns an object that can list and get ImageStreams. + ImageStreams(namespace string) ImageStreamNamespaceLister + ImageStreamListerExpansion +} + +// imageStreamLister implements the ImageStreamLister interface. +type imageStreamLister struct { + indexer cache.Indexer +} + +// NewImageStreamLister returns a new ImageStreamLister. +func NewImageStreamLister(indexer cache.Indexer) ImageStreamLister { + return &imageStreamLister{indexer: indexer} +} + +// List lists all ImageStreams in the indexer. +func (s *imageStreamLister) List(selector labels.Selector) (ret []*v1.ImageStream, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageStream)) + }) + return ret, err +} + +// ImageStreams returns an object that can list and get ImageStreams. +func (s *imageStreamLister) ImageStreams(namespace string) ImageStreamNamespaceLister { + return imageStreamNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ImageStreamNamespaceLister helps list and get ImageStreams. +// All objects returned here must be treated as read-only. +type ImageStreamNamespaceLister interface { + // List lists all ImageStreams in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStream, err error) + // Get retrieves the ImageStream from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ImageStream, error) + ImageStreamNamespaceListerExpansion +} + +// imageStreamNamespaceLister implements the ImageStreamNamespaceLister +// interface. +type imageStreamNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ImageStreams in the indexer for a given namespace. +func (s imageStreamNamespaceLister) List(selector labels.Selector) (ret []*v1.ImageStream, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageStream)) + }) + return ret, err +} + +// Get retrieves the ImageStream from the indexer for a given namespace and name. +func (s imageStreamNamespaceLister) Get(name string) (*v1.ImageStream, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("imagestream"), name) + } + return obj.(*v1.ImageStream), nil +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go new file mode 100644 index 0000000000..6042b27bbe --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ImageStreamTagLister helps list ImageStreamTags. +// All objects returned here must be treated as read-only. +type ImageStreamTagLister interface { + // List lists all ImageStreamTags in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) + // ImageStreamTags returns an object that can list and get ImageStreamTags. + ImageStreamTags(namespace string) ImageStreamTagNamespaceLister + ImageStreamTagListerExpansion +} + +// imageStreamTagLister implements the ImageStreamTagLister interface. +type imageStreamTagLister struct { + indexer cache.Indexer +} + +// NewImageStreamTagLister returns a new ImageStreamTagLister. +func NewImageStreamTagLister(indexer cache.Indexer) ImageStreamTagLister { + return &imageStreamTagLister{indexer: indexer} +} + +// List lists all ImageStreamTags in the indexer. +func (s *imageStreamTagLister) List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageStreamTag)) + }) + return ret, err +} + +// ImageStreamTags returns an object that can list and get ImageStreamTags. +func (s *imageStreamTagLister) ImageStreamTags(namespace string) ImageStreamTagNamespaceLister { + return imageStreamTagNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ImageStreamTagNamespaceLister helps list and get ImageStreamTags. +// All objects returned here must be treated as read-only. +type ImageStreamTagNamespaceLister interface { + // List lists all ImageStreamTags in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) + // Get retrieves the ImageStreamTag from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ImageStreamTag, error) + ImageStreamTagNamespaceListerExpansion +} + +// imageStreamTagNamespaceLister implements the ImageStreamTagNamespaceLister +// interface. +type imageStreamTagNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ImageStreamTags in the indexer for a given namespace. +func (s imageStreamTagNamespaceLister) List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageStreamTag)) + }) + return ret, err +} + +// Get retrieves the ImageStreamTag from the indexer for a given namespace and name. +func (s imageStreamTagNamespaceLister) Get(name string) (*v1.ImageStreamTag, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("imagestreamtag"), name) + } + return obj.(*v1.ImageStreamTag), nil +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go new file mode 100644 index 0000000000..bbc4518c23 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ImageTagLister helps list ImageTags. +// All objects returned here must be treated as read-only. +type ImageTagLister interface { + // List lists all ImageTags in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageTag, err error) + // ImageTags returns an object that can list and get ImageTags. + ImageTags(namespace string) ImageTagNamespaceLister + ImageTagListerExpansion +} + +// imageTagLister implements the ImageTagLister interface. +type imageTagLister struct { + indexer cache.Indexer +} + +// NewImageTagLister returns a new ImageTagLister. +func NewImageTagLister(indexer cache.Indexer) ImageTagLister { + return &imageTagLister{indexer: indexer} +} + +// List lists all ImageTags in the indexer. +func (s *imageTagLister) List(selector labels.Selector) (ret []*v1.ImageTag, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageTag)) + }) + return ret, err +} + +// ImageTags returns an object that can list and get ImageTags. +func (s *imageTagLister) ImageTags(namespace string) ImageTagNamespaceLister { + return imageTagNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ImageTagNamespaceLister helps list and get ImageTags. +// All objects returned here must be treated as read-only. +type ImageTagNamespaceLister interface { + // List lists all ImageTags in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageTag, err error) + // Get retrieves the ImageTag from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ImageTag, error) + ImageTagNamespaceListerExpansion +} + +// imageTagNamespaceLister implements the ImageTagNamespaceLister +// interface. +type imageTagNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ImageTags in the indexer for a given namespace. +func (s imageTagNamespaceLister) List(selector labels.Selector) (ret []*v1.ImageTag, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageTag)) + }) + return ret, err +} + +// Get retrieves the ImageTag from the indexer for a given namespace and name. +func (s imageTagNamespaceLister) Get(name string) (*v1.ImageTag, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("imagetag"), name) + } + return obj.(*v1.ImageTag), nil +} diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh old mode 100755 new mode 100644 diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh old mode 100755 new mode 100644 diff --git a/vendor/modules.txt b/vendor/modules.txt index 5a1a7f00de..0c514635d6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -852,8 +852,16 @@ github.com/openshift/api/user/v1 ## explicit; go 1.19 github.com/openshift/client-go/build/applyconfigurations/build/v1 github.com/openshift/client-go/build/applyconfigurations/internal +github.com/openshift/client-go/build/clientset/versioned +github.com/openshift/client-go/build/clientset/versioned/fake github.com/openshift/client-go/build/clientset/versioned/scheme github.com/openshift/client-go/build/clientset/versioned/typed/build/v1 +github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake +github.com/openshift/client-go/build/informers/externalversions +github.com/openshift/client-go/build/informers/externalversions/build +github.com/openshift/client-go/build/informers/externalversions/build/v1 +github.com/openshift/client-go/build/informers/externalversions/internalinterfaces +github.com/openshift/client-go/build/listers/build/v1 github.com/openshift/client-go/config/applyconfigurations/config/v1 github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1 github.com/openshift/client-go/config/applyconfigurations/internal @@ -873,8 +881,16 @@ github.com/openshift/client-go/config/listers/config/v1 github.com/openshift/client-go/config/listers/config/v1alpha1 github.com/openshift/client-go/image/applyconfigurations/image/v1 github.com/openshift/client-go/image/applyconfigurations/internal +github.com/openshift/client-go/image/clientset/versioned +github.com/openshift/client-go/image/clientset/versioned/fake github.com/openshift/client-go/image/clientset/versioned/scheme github.com/openshift/client-go/image/clientset/versioned/typed/image/v1 +github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake +github.com/openshift/client-go/image/informers/externalversions +github.com/openshift/client-go/image/informers/externalversions/image +github.com/openshift/client-go/image/informers/externalversions/image/v1 +github.com/openshift/client-go/image/informers/externalversions/internalinterfaces +github.com/openshift/client-go/image/listers/image/v1 github.com/openshift/client-go/operator/applyconfigurations/internal github.com/openshift/client-go/operator/applyconfigurations/operator/v1 github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1