diff --git a/internal/clients/builder.go b/internal/clients/builder.go index ea3d0aff26..561774d0c0 100644 --- a/internal/clients/builder.go +++ b/internal/clients/builder.go @@ -3,7 +3,9 @@ package clients import ( "os" + buildclientset "github.com/openshift/client-go/build/clientset/versioned" configclientset "github.com/openshift/client-go/config/clientset/versioned" + imageclientset "github.com/openshift/client-go/image/clientset/versioned" operatorclientset "github.com/openshift/client-go/operator/clientset/versioned" mcfgclientset "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned" apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -56,6 +58,14 @@ func (cb *Builder) APIExtClientOrDie(name string) apiext.Interface { return apiext.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) } +func (cb *Builder) BuildClientOrDie(name string) buildclientset.Interface { + return buildclientset.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) +} + +func (cb *Builder) ImageClientOrDie(name string) imageclientset.Interface { + return imageclientset.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) +} + // GetBuilderConfig returns a copy of the builders *rest.Config func (cb *Builder) GetBuilderConfig() *rest.Config { return rest.CopyConfig(cb.config) diff --git a/pkg/apis/machineconfiguration.openshift.io/v1/types.go b/pkg/apis/machineconfiguration.openshift.io/v1/types.go index 8cfa3ba29d..3c780e7d5b 100644 --- a/pkg/apis/machineconfiguration.openshift.io/v1/types.go +++ b/pkg/apis/machineconfiguration.openshift.io/v1/types.go @@ -358,6 +358,14 @@ const ( // MachineConfigPoolDegraded is the overall status of the pool based, today, on whether we fail with NodeDegraded or RenderDegraded MachineConfigPoolDegraded MachineConfigPoolConditionType = "Degraded" + + MachineConfigPoolBuildPending MachineConfigPoolConditionType = "BuildPending" + + MachineConfigPoolBuilding MachineConfigPoolConditionType = "Building" + + MachineConfigPoolBuildSuccess MachineConfigPoolConditionType = "BuildSuccess" + + MachineConfigPoolBuildFailed MachineConfigPoolConditionType = "BuildFailed" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/controller/build/assets/Dockerfile.on-cluster-build-template b/pkg/controller/build/assets/Dockerfile.on-cluster-build-template new file mode 100644 index 0000000000..24ed777b95 --- /dev/null +++ b/pkg/controller/build/assets/Dockerfile.on-cluster-build-template @@ -0,0 +1,33 @@ +# This Dockerfile is not intended to be directly built. Instead, it is embedded +# within the Build Controller binary (see //go:embed) and templatized with +# certain options around base image pullspecs. +# +# Decode and extract the MachineConfig from the gzipped ConfigMap and move it +# into position. We do this in a separate stage so that we don't have the +# gzipped MachineConfig laying around. +FROM {{.BaseImage.Pullspec}} AS extract +COPY ./machineconfig/machineconfig.json.gz /tmp/machineconfig.json.gz +RUN mkdir -p /etc/machine-config-daemon && \ + cat /tmp/machineconfig.json.gz | base64 -d | gunzip - > /etc/machine-config-daemon/currentconfig + +{{if .ExtensionsImage.Pullspec}} +# Pull our extensions image. Not sure yet what / how this should be wired up +# though. Ideally, I'd like to use some Buildah tricks to have the extensions +# directory mounted into the container at build-time so that I don't have to +# copy the RPMs into the container, configure the repo, and do the +# installation. Alternatively, I'd have to start a pod with an HTTP server. +FROM {{.ExtensionsImage.Pullspec}} AS extensions +{{end}} + + +FROM {{.BaseImage.Pullspec}} AS final +# Copy the extracted MachineConfig into the expected place in the image. +COPY --from=extract /etc/machine-config-daemon/currentconfig /etc/machine-config-daemon/currentconfig +# Do the ignition live-apply, extracting the Ignition config from the MachineConfig. +RUN exec -a ignition-apply /usr/lib/dracut/modules.d/30ignition/ignition --ignore-unsupported <(cat /etc/machine-config-daemon/currentconfig | jq '.spec.config') && \ + ostree container commit + +LABEL machineconfig={{.Pool.Spec.Configuration.Name}} +LABEL machineconfigpool={{.Pool.Name}} +LABEL releaseversion={{.ReleaseVersion}} +LABEL baseOSContainerImage={{.BaseImage.Pullspec}} diff --git a/pkg/controller/build/assets/README.md b/pkg/controller/build/assets/README.md new file mode 100644 index 0000000000..9b4c75a333 --- /dev/null +++ b/pkg/controller/build/assets/README.md @@ -0,0 +1,5 @@ +# assets + +These files get embedded within the Go binary and are not intended for direct +use. In particular, the Dockerfile is interspersed with Go templates and will +not build unless rendered with a tool such as [Gomplate](https://github.com/hairyhenderson/gomplate). diff --git a/pkg/controller/build/assets/buildah-build.sh b/pkg/controller/build/assets/buildah-build.sh new file mode 100644 index 0000000000..e57bdcdad9 --- /dev/null +++ b/pkg/controller/build/assets/buildah-build.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# +# This script is not meant to be directly executed. Instead, it is embedded +# within the Build Controller binary (see //go:embed) and injected into a +# custom build pod. +set -xeuo + +build_context="$HOME/context" + +# Create a directory to hold our build context. +mkdir -p "$build_context/machineconfig" + +# Copy the Dockerfile and Machineconfigs from configmaps into our build context. +cp /tmp/dockerfile/Dockerfile "$build_context" +cp /tmp/machineconfig/machineconfig.json.gz "$build_context/machineconfig/" + +# Build our image using Buildah. +buildah bud \ + --storage-driver vfs \ + --authfile="$BASE_IMAGE_PULL_CREDS" \ + --tag "$TAG" \ + --file="$build_context/Dockerfile" "$build_context" + +# Push our built image. +buildah push \ + --storage-driver vfs \ + --authfile="$FINAL_IMAGE_PUSH_CREDS" \ + --digestfile="/tmp/done/digestfile" \ + --cert-dir /var/run/secrets/kubernetes.io/serviceaccount "$TAG" diff --git a/pkg/controller/build/assets/podman-build.sh b/pkg/controller/build/assets/podman-build.sh new file mode 100644 index 0000000000..882c4f5ef9 --- /dev/null +++ b/pkg/controller/build/assets/podman-build.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# +# This script is not meant to be directly executed. Instead, it is embedded +# within the Build Controller binary (see //go:embed) and injected into a +# custom build pod. +set -xeuo + +build_context="/tmp/context" + +# Create a directory to hold our build context. +mkdir -p "$build_context/machineconfig" + +# Copy the Dockerfile and Machineconfigs from configmaps into our build context. +cp /tmp/dockerfile/Dockerfile "$build_context" +cp /tmp/machineconfig/machineconfig.json.gz "$build_context/machineconfig/" + +# Build our image using Buildah. +podman build \ + --storage-driver vfs \ + --authfile="$BASE_IMAGE_PULL_CREDS" \ + --tag "$TAG" \ + --file="$build_context/Dockerfile" "$build_context" + +# Push our built image. +podman push \ + --storage-driver vfs \ + --authfile="$FINAL_IMAGE_PUSH_CREDS" \ + --digestfile="/tmp/digestfile" \ + --cert-dir /var/run/secrets/kubernetes.io/serviceaccount "$TAG" + +# Store the digestfile in a configmap for future retrieval. +oc create configmap \ + "$DIGEST_CONFIGMAP_NAME" \ + --namespace openshift-machine-config-operator \ + --from-file=digest=/tmp/digestfile diff --git a/pkg/controller/build/assets/wait.sh b/pkg/controller/build/assets/wait.sh new file mode 100644 index 0000000000..bf1675fe63 --- /dev/null +++ b/pkg/controller/build/assets/wait.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# +# This script is not meant to be directly executed. Instead, it is embedded +# within the Build Controller binary (see //go:embed) and injected into a +# custom build pod. + +# Wait until the done file appears. +while [ ! -f "/tmp/done/digestfile" ] +do + sleep 1 +done + +oc create configmap \ + "$DIGEST_CONFIGMAP_NAME" \ + --namespace openshift-machine-config-operator \ + --from-file=digest=/tmp/done/digestfile diff --git a/pkg/controller/build/build_controller.go b/pkg/controller/build/build_controller.go new file mode 100644 index 0000000000..10343ed083 --- /dev/null +++ b/pkg/controller/build/build_controller.go @@ -0,0 +1,1297 @@ +package build + +import ( + "bytes" + "context" + "fmt" + "strings" + "time" + + "github.com/containers/image/v5/docker/reference" + buildv1 "github.com/openshift/api/build/v1" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme" + corev1 "k8s.io/api/core/v1" + aggerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + buildinformers "github.com/openshift/client-go/build/informers/externalversions" + + buildinformersv1 "github.com/openshift/client-go/build/informers/externalversions/build/v1" + + buildclientset "github.com/openshift/client-go/build/clientset/versioned" + + mcfgclientset "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned" + mcfginformers "github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions" + + mcfginformersv1 "github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions/machineconfiguration.openshift.io/v1" + mcfglistersv1 "github.com/openshift/machine-config-operator/pkg/generated/listers/machineconfiguration.openshift.io/v1" + + coreinformers "k8s.io/client-go/informers" + coreinformersv1 "k8s.io/client-go/informers/core/v1" + + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + + "github.com/openshift/machine-config-operator/internal/clients" +) + +const ( + targetMachineConfigPoolLabel = "machineconfiguration.openshift.io/targetMachineConfigPool" + // TODO(zzlotnik): Is there a constant for this someplace else? + desiredConfigLabel = "machineconfiguration.openshift.io/desiredConfig" +) + +// on-cluster-build-config ConfigMap keys. +const ( + // Name of ConfigMap which contains knobs for configuring the build controller. + onClusterBuildConfigMapName = "on-cluster-build-config" + + // The on-cluster-build-config ConfigMap key which contains a K8s secret capable of pulling of the base OS image. + baseImagePullSecretNameConfigKey = "baseImagePullSecretName" + + // The on-cluster-build-config ConfigMap key which contains a K8s secret capable of pushing the final OS image. + finalImagePushSecretNameConfigKey = "finalImagePushSecretName" + + // The on-cluster-build-config ConfigMap key which contains the pullspec of where to push the final OS image (e.g., registry.hostname.com/org/repo:tag). + finalImagePullspecConfigKey = "finalImagePullspec" +) + +// machine-config-osimageurl ConfigMap keys. +const ( + // TODO: Is this a constant someplace else? + machineConfigOSImageURLConfigMapName = "machine-config-osimageurl" + + // The machine-config-osimageurl ConfigMap key which contains the pullspec of the base OS image (e.g., registry.hostname.com/org/repo:tag). + baseOSContainerImageConfigKey = "baseOSContainerImage" + + // The machine-config-osimageurl ConfigMap key which contains the pullspec of the base OS image (e.g., registry.hostname.com/org/repo:tag). + baseOSExtensionsContainerImageConfigKey = "baseOSExtensionsContainerImage" + + // The machine-config-osimageurl ConfigMap key which contains the current OpenShift release version. + releaseVersionConfigKey = "releaseVersion" + + // The machine-config-osimageurl ConfigMap key which contains the osImageURL + // value. I don't think we actually use this anywhere though. + osImageURLConfigKey = "osImageURL" +) + +var ( + // controllerKind contains the schema.GroupVersionKind for this controller type. + //nolint:varcheck,deadcode // This will be used eventually + controllerKind = mcfgv1.SchemeGroupVersion.WithKind("MachineConfigPool") +) + +//nolint:revive // If I name this ControllerConfig, that name will be overloaded :P +type BuildControllerConfig struct { + // updateDelay is a pause to deal with churn in MachineConfigs; see + // https://github.com/openshift/machine-config-operator/issues/301 + // Default: 5 seconds + UpdateDelay time.Duration + + // maxRetries is the number of times a machineconfig pool will be retried before it is dropped out of the queue. + // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times + // a machineconfig pool is going to be requeued: + // + // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s + // Default: 5 + MaxRetries int +} + +type ImageBuilder interface { + Run(context.Context, int) + StartBuild(ImageBuildRequest) (*corev1.ObjectReference, error) + IsBuildRunning(*mcfgv1.MachineConfigPool) (bool, error) + DeleteBuildObject(*mcfgv1.MachineConfigPool) error + FinalPullspec(*mcfgv1.MachineConfigPool) (string, error) +} + +// Controller defines the build controller. +type Controller struct { + *Clients + *informers + + eventRecorder record.EventRecorder + + syncHandler func(mcp string) error + enqueueMachineConfigPool func(*mcfgv1.MachineConfigPool) + + ccLister mcfglistersv1.ControllerConfigLister + mcpLister mcfglistersv1.MachineConfigPoolLister + + ccListerSynced cache.InformerSynced + mcpListerSynced cache.InformerSynced + podListerSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface + + config BuildControllerConfig + imageBuilder ImageBuilder +} + +// Creates a BuildControllerConfig with sensible production defaults. +func DefaultBuildControllerConfig() BuildControllerConfig { + return BuildControllerConfig{ + MaxRetries: 5, + UpdateDelay: time.Second * 5, + } +} + +// Holds each of the clients used by the Build Controller and its subcontrollers. +type Clients struct { + mcfgclient mcfgclientset.Interface + kubeclient clientset.Interface + buildclient buildclientset.Interface +} + +func NewClientsFromControllerContext(ctrlCtx *ctrlcommon.ControllerContext) *Clients { + return NewClients(ctrlCtx.ClientBuilder) +} + +func NewClients(cb *clients.Builder) *Clients { + return &Clients{ + mcfgclient: cb.MachineConfigClientOrDie("machine-os-builder"), + kubeclient: cb.KubeClientOrDie("machine-os-builder"), + buildclient: cb.BuildClientOrDie("machine-os-builder"), + } +} + +// Holds and starts each of the infomrers used by the Build Controller and its subcontrollers. +type informers struct { + ccInformer mcfginformersv1.ControllerConfigInformer + mcpInformer mcfginformersv1.MachineConfigPoolInformer + buildInformer buildinformersv1.BuildInformer + podInformer coreinformersv1.PodInformer + toStart []interface{ Start(<-chan struct{}) } +} + +// Starts the informers, wiring them up to the provided context. +func (i *informers) start(ctx context.Context) { + for _, startable := range i.toStart { + startable.Start(ctx.Done()) + } +} + +// Creates new informer instances from a given Clients(set). +func newInformers(bcc *Clients) *informers { + ccInformer := mcfginformers.NewSharedInformerFactory(bcc.mcfgclient, 0) + mcpInformer := mcfginformers.NewSharedInformerFactory(bcc.mcfgclient, 0) + buildInformer := buildinformers.NewSharedInformerFactoryWithOptions(bcc.buildclient, 0, buildinformers.WithNamespace(ctrlcommon.MCONamespace)) + podInformer := coreinformers.NewSharedInformerFactoryWithOptions(bcc.kubeclient, 0, coreinformers.WithNamespace(ctrlcommon.MCONamespace)) + + return &informers{ + ccInformer: ccInformer.Machineconfiguration().V1().ControllerConfigs(), + mcpInformer: mcpInformer.Machineconfiguration().V1().MachineConfigPools(), + buildInformer: buildInformer.Build().V1().Builds(), + podInformer: podInformer.Core().V1().Pods(), + toStart: []interface{ Start(<-chan struct{}) }{ + ccInformer, + mcpInformer, + buildInformer, + podInformer, + }, + } +} + +// Creates a basic Build Controller instance without configuring an ImageBuilder. +func newBuildController( + ctrlConfig BuildControllerConfig, + clients *Clients, +) *Controller { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: clients.kubeclient.CoreV1().Events("")}) + + ctrl := &Controller{ + informers: newInformers(clients), + Clients: clients, + eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineosbuilder-buildcontroller"}), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineosbuilder-buildcontroller"), + config: ctrlConfig, + } + + ctrl.mcpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addMachineConfigPool, + UpdateFunc: ctrl.updateMachineConfigPool, + DeleteFunc: ctrl.deleteMachineConfigPool, + }) + + ctrl.syncHandler = ctrl.syncMachineConfigPool + ctrl.enqueueMachineConfigPool = ctrl.enqueueDefault + + ctrl.ccLister = ctrl.ccInformer.Lister() + ctrl.mcpLister = ctrl.mcpInformer.Lister() + + ctrl.ccListerSynced = ctrl.ccInformer.Informer().HasSynced + ctrl.mcpListerSynced = ctrl.mcpInformer.Informer().HasSynced + + return ctrl +} + +// Creates a Build Controller instance with a custom pod builder implementation +// for the ImageBuilder. +func NewWithCustomPodBuilder( + ctrlConfig BuildControllerConfig, + clients *Clients, +) *Controller { + ctrl := newBuildController(ctrlConfig, clients) + ctrl.imageBuilder = newPodBuildController(ctrlConfig, clients, ctrl.customBuildPodUpdater) + return ctrl +} + +// Creates a Build Controller instance with an OpenShift Image Builder +// implementation for the ImageBuilder. +func NewWithImageBuilder( + ctrlConfig BuildControllerConfig, + clients *Clients, +) *Controller { + ctrl := newBuildController(ctrlConfig, clients) + ctrl.imageBuilder = newImageBuildController(ctrlConfig, clients, ctrl.imageBuildUpdater) + return ctrl +} + +// Run executes the render controller. +// TODO: Make this use a context instead of a stop channel. +func (ctrl *Controller) Run(parentCtx context.Context, workers int) { + klog.Info("Starting MachineOSBuilder-BuildController") + defer klog.Info("Shutting down MachineOSBuilder-BuildController") + + // Not sure if I actually need a child context here or not. + ctx, cancel := context.WithCancel(parentCtx) + defer utilruntime.HandleCrash() + defer ctrl.queue.ShutDown() + defer cancel() + + ctrl.informers.start(ctx) + + if !cache.WaitForCacheSync(ctx.Done(), ctrl.mcpListerSynced, ctrl.ccListerSynced) { + return + } + + go ctrl.imageBuilder.Run(ctx, workers) + + for i := 0; i < workers; i++ { + go wait.Until(ctrl.worker, time.Second, ctx.Done()) + } + + <-ctx.Done() +} + +func (ctrl *Controller) enqueue(pool *mcfgv1.MachineConfigPool) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) + return + } + + ctrl.queue.Add(key) +} + +func (ctrl *Controller) enqueueRateLimited(pool *mcfgv1.MachineConfigPool) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) + return + } + + ctrl.queue.AddRateLimited(key) +} + +// enqueueAfter will enqueue a pool after the provided amount of time. +func (ctrl *Controller) enqueueAfter(pool *mcfgv1.MachineConfigPool, after time.Duration) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pool, err)) + return + } + + ctrl.queue.AddAfter(key, after) +} + +// enqueueDefault calls a default enqueue function +func (ctrl *Controller) enqueueDefault(pool *mcfgv1.MachineConfigPool) { + ctrl.enqueueAfter(pool, ctrl.config.UpdateDelay) +} + +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncHandler is never invoked concurrently with the same key. +func (ctrl *Controller) worker() { + for ctrl.processNextWorkItem() { + } +} + +func (ctrl *Controller) processNextWorkItem() bool { + key, quit := ctrl.queue.Get() + if quit { + return false + } + defer ctrl.queue.Done(key) + + err := ctrl.syncHandler(key.(string)) + ctrl.handleErr(err, key) + + return true +} + +// Reconciles the MachineConfigPool state with the state of an OpenShift Image +// Builder object. +func (ctrl *Controller) imageBuildUpdater(build *buildv1.Build) error { + pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), build.Labels[targetMachineConfigPoolLabel], metav1.GetOptions{}) + if err != nil { + return err + } + + klog.Infof("Build (%s) is %s", build.Name, build.Status.Phase) + + objRef := toObjectRef(build) + + switch build.Status.Phase { + case buildv1.BuildPhaseNew, buildv1.BuildPhasePending: + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildPending) { + err = ctrl.markBuildPendingWithObjectRef(pool, *objRef) + } + case buildv1.BuildPhaseRunning: + // If we're running, then there's nothing to do right now. + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuilding) { + err = ctrl.markBuildInProgress(pool) + } + case buildv1.BuildPhaseComplete: + // If we've succeeded, we need to update the pool to indicate that. + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildSuccess) { + err = ctrl.markBuildSucceeded(pool) + } + case buildv1.BuildPhaseFailed, buildv1.BuildPhaseError, buildv1.BuildPhaseCancelled: + // If we've failed, errored, or cancelled, we need to update the pool to indicate that. + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildFailed) { + err = ctrl.markBuildFailed(pool) + } + } + + if err != nil { + return err + } + + ctrl.enqueueMachineConfigPool(pool) + return nil +} + +// Reconciles the MachineConfigPool state with the state of a custom pod object. +func (ctrl *Controller) customBuildPodUpdater(pod *corev1.Pod) error { + pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), pod.Labels[targetMachineConfigPoolLabel], metav1.GetOptions{}) + if err != nil { + return err + } + + klog.Infof("Build pod (%s) is %s", pod.Name, pod.Status.Phase) + + switch pod.Status.Phase { + case corev1.PodPending: + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildPending) { + objRef := toObjectRef(pod) + err = ctrl.markBuildPendingWithObjectRef(pool, *objRef) + } + case corev1.PodRunning: + // If we're running, then there's nothing to do right now. + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuilding) { + err = ctrl.markBuildInProgress(pool) + } + case corev1.PodSucceeded: + // If we've succeeded, we need to update the pool to indicate that. + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildSuccess) { + err = ctrl.markBuildSucceeded(pool) + } + case corev1.PodFailed: + // If we've failed, we need to update the pool to indicate that. + if !mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildFailed) { + err = ctrl.markBuildFailed(pool) + } + } + + if err != nil { + return err + } + + ctrl.enqueueMachineConfigPool(pool) + return nil +} + +func (ctrl *Controller) handleErr(err error, key interface{}) { + if err == nil { + ctrl.queue.Forget(key) + return + } + + if ctrl.queue.NumRequeues(key) < ctrl.config.MaxRetries { + klog.V(2).Infof("Error syncing machineconfigpool %v: %v", key, err) + ctrl.queue.AddRateLimited(key) + return + } + + utilruntime.HandleError(err) + klog.V(2).Infof("Dropping machineconfigpool %q out of the queue: %v", key, err) + ctrl.queue.Forget(key) + ctrl.queue.AddAfter(key, 1*time.Minute) +} + +// syncMachineConfigPool will sync the machineconfig pool with the given key. +// This function is not meant to be invoked concurrently with the same key. +func (ctrl *Controller) syncMachineConfigPool(key string) error { + startTime := time.Now() + klog.V(4).Infof("Started syncing machineconfigpool %q (%v)", key, startTime) + defer func() { + klog.V(4).Infof("Finished syncing machineconfigpool %q (%v)", key, time.Since(startTime)) + }() + + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + machineconfigpool, err := ctrl.mcpLister.Get(name) + if k8serrors.IsNotFound(err) { + klog.V(2).Infof("MachineConfigPool %v has been deleted", key) + return nil + } + if err != nil { + return err + } + + // TODO: Doing a deep copy of this pool object from our cache and using it to + // determine our next course of action sometimes causes a race condition. I'm + // not sure if it's better to get a current copy from the API server or what. + // pool := machineconfigpool.DeepCopy() + pool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), machineconfigpool.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + // Not a layered pool, so stop here. + if !ctrlcommon.IsLayeredPool(pool) { + klog.V(4).Infof("MachineConfigPool %s is not opted-in for layering, ignoring", pool.Name) + return nil + } + + switch { + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolDegraded): + klog.V(4).Infof("MachineConfigPool %s is degraded, requeueing", pool.Name) + ctrl.enqueueMachineConfigPool(pool) + return nil + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded): + klog.V(4).Infof("MachineConfigPool %s is render degraded, requeueing", pool.Name) + ctrl.enqueueMachineConfigPool(pool) + return nil + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildPending): + klog.V(4).Infof("MachineConfigPool %s is build pending", pool.Name) + return nil + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuilding): + klog.V(4).Infof("MachineConfigPool %s is building", pool.Name) + return nil + case mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildSuccess): + klog.V(4).Infof("MachineConfigPool %s has successfully built", pool.Name) + return nil + default: + shouldBuild, err := shouldWeDoABuild(ctrl.imageBuilder, pool, pool) + if err != nil { + return fmt.Errorf("could not determine if a build is required for MachineConfigPool %q: %w", pool.Name, err) + } + + if shouldBuild { + return ctrl.startBuildForMachineConfigPool(pool) + } + + klog.V(4).Infof("Nothing to do for pool %q", pool.Name) + } + + // For everything else + return ctrl.syncAvailableStatus(pool) +} + +// Marks a given MachineConfigPool as a failed build. +func (ctrl *Controller) markBuildFailed(pool *mcfgv1.MachineConfigPool) error { + klog.Errorf("Build failed for pool %s", pool.Name) + + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Reason: "BuildFailed", + Status: corev1.ConditionTrue, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildPending, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolDegraded, + Status: corev1.ConditionTrue, + }, + }) + + return ctrl.syncFailingStatus(pool, fmt.Errorf("build failed")) +} + +// Marks a given MachineConfigPool as the build is in progress. +func (ctrl *Controller) markBuildInProgress(pool *mcfgv1.MachineConfigPool) error { + klog.Infof("Build in progress for MachineConfigPool %s, config %s", pool.Name, pool.Spec.Configuration.Name) + + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Reason: "BuildRunning", + Status: corev1.ConditionTrue, + }, + { + Type: mcfgv1.MachineConfigPoolBuildPending, + Status: corev1.ConditionFalse, + }, + }) + + return ctrl.syncAvailableStatus(pool) +} + +// Deletes the ephemeral objects we created to perform this specific build. +func (ctrl *Controller) postBuildCleanup(pool *mcfgv1.MachineConfigPool, ignoreMissing bool) error { + // Delete the actual build object itself. + deleteBuildObject := func() error { + err := ctrl.imageBuilder.DeleteBuildObject(pool) + + if err == nil { + klog.Infof("Deleted build object %s", newImageBuildRequest(pool).getBuildName()) + } + + return err + } + + // Delete the ConfigMap containing the MachineConfig. + deleteMCConfigMap := func() error { + ibr := newImageBuildRequest(pool) + + err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getMCConfigMapName(), metav1.DeleteOptions{}) + + if err == nil { + klog.Infof("Deleted MachineConfig ConfigMap %s for build %s", ibr.getMCConfigMapName(), ibr.getBuildName()) + } + + return err + } + + // Delete the ConfigMap containing the Dockerfile. + deleteDockerfileConfigMap := func() error { + ibr := newImageBuildRequest(pool) + + err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getDockerfileConfigMapName(), metav1.DeleteOptions{}) + + if err == nil { + klog.Infof("Deleted Dockerfile ConfigMap %s for build %s", ibr.getDockerfileConfigMapName(), ibr.getBuildName()) + } + + return err + } + + maybeIgnoreMissing := func(f func() error) func() error { + return func() error { + if ignoreMissing { + return ignoreIsNotFoundErr(f()) + } + + return f() + } + } + + // If *any* of these we fail, we want to emit an error. If *all* fail, we + // want all of the error messages. + return aggerrors.AggregateGoroutines( + maybeIgnoreMissing(deleteBuildObject), + maybeIgnoreMissing(deleteMCConfigMap), + maybeIgnoreMissing(deleteDockerfileConfigMap), + ) +} + +// Marks a given MachineConfigPool as build successful and cleans up after itself. +func (ctrl *Controller) markBuildSucceeded(pool *mcfgv1.MachineConfigPool) error { + klog.Infof("Build succeeded for MachineConfigPool %s, config %s", pool.Name, pool.Spec.Configuration.Name) + + // Get the final image pullspec. + imagePullspec, err := ctrl.imageBuilder.FinalPullspec(pool) + if err != nil { + return fmt.Errorf("could not get final image pullspec for pool %s: %w", pool.Name, err) + } + + if imagePullspec == "" { + return fmt.Errorf("image pullspec empty for pool %s", pool.Name) + } + + // Perform the post-build cleanup. + if err := ctrl.postBuildCleanup(pool, false); err != nil { + return fmt.Errorf("could not do post-build cleanup: %w", err) + } + + // Set the annotation or field to point to the newly-built container image. + klog.V(4).Infof("Setting new image pullspec for %s to %s", pool.Name, imagePullspec) + if pool.Annotations == nil { + pool.Annotations = map[string]string{} + } + pool.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] = imagePullspec + + // Remove the build object reference from the MachineConfigPool since we're + // not using it anymore. + deleteBuildRefFromMachineConfigPool(pool) + + // Adjust the MachineConfigPool status to indicate success. + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Reason: "BuildSucceeded", + Status: corev1.ConditionTrue, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolDegraded, + Status: corev1.ConditionFalse, + }, + }) + + // Perform the MachineConfigPool update. + return ctrl.updatePoolAndSyncStatus(pool, ctrl.syncAvailableStatus) +} + +// Marks a given MachineConfigPool as build pending. +func (ctrl *Controller) markBuildPendingWithObjectRef(pool *mcfgv1.MachineConfigPool, objRef corev1.ObjectReference) error { + klog.Infof("Build for %s marked pending with object reference %v", pool.Name, objRef) + + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildPending, + Reason: "BuildPending", + Status: corev1.ConditionTrue, + }, + }) + + // If the MachineConfigPool has the build object reference, we just want to + // update the MachineConfigPool's status. + if machineConfigPoolHasObjectRef(pool, objRef) { + return ctrl.syncAvailableStatus(pool) + } + + // If we added the build object reference, we need to update both the + // MachineConfigPool itself and its status. + addObjectRefIfMissing(pool, objRef) + return ctrl.updatePoolAndSyncStatus(pool, ctrl.syncAvailableStatus) +} + +func (ctrl *Controller) markBuildPending(pool *mcfgv1.MachineConfigPool) error { + klog.Infof("Build for %s marked pending", pool.Name) + + setMCPBuildConditions(pool, []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildFailed, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuilding, + Status: corev1.ConditionFalse, + }, + { + Type: mcfgv1.MachineConfigPoolBuildPending, + Reason: "BuildPending", + Status: corev1.ConditionTrue, + }, + }) + + return ctrl.syncAvailableStatus(pool) +} + +func (ctrl *Controller) updatePoolAndSyncStatus(pool *mcfgv1.MachineConfigPool, statusFunc func(*mcfgv1.MachineConfigPool) error) error { + // We need to do an API server round-trip to ensure all of our mutations get + // propagated. + updatedPool, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), pool, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update MachineConfigPool %q: %w", pool.Name, err) + } + + updatedPool.Status = pool.Status + + return statusFunc(updatedPool) +} + +// Machine Config Pools + +func (ctrl *Controller) addMachineConfigPool(obj interface{}) { + pool := obj.(*mcfgv1.MachineConfigPool).DeepCopy() + klog.V(4).Infof("Adding MachineConfigPool %s", pool.Name) + ctrl.enqueueMachineConfigPool(pool) +} + +// Prepares all of the objects needed to perform an image build. +func (ctrl *Controller) prepareMachineConfigForPool(ibr ImageBuildRequest) error { + mc, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigs().Get(context.TODO(), ibr.Pool.Spec.Configuration.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("could not get MachineConfig %s: %w", ibr.Pool.Spec.Configuration.Name, err) + } + + mcConfigMap, err := ibr.toConfigMap(mc) + if err != nil { + return fmt.Errorf("could not convert MachineConfig %s into ConfigMap: %w", mc.Name, err) + } + + _, err = ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Create(context.TODO(), mcConfigMap, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("could not load rendered MachineConfig %s into configmap: %w", mcConfigMap.Name, err) + } + + klog.Infof("Stored MachineConfig %s in ConfigMap %s for build", mc.Name, mcConfigMap.Name) + + dockerfileConfigMap, err := ibr.dockerfileToConfigMap() + if err != nil { + return fmt.Errorf("could not generate Dockerfile ConfigMap: %w", err) + } + + _, err = ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Create(context.TODO(), dockerfileConfigMap, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("could not load rendered Dockerfile %s into configmap: %w", dockerfileConfigMap.Name, err) + } + + klog.Infof("Stored Dockerfile for build %s in ConfigMap %s for build", ibr.getBuildName(), dockerfileConfigMap.Name) + + return nil +} + +// Determines if we should run a build, then starts a build pod to perform the +// build, and updates the MachineConfigPool with an object reference for the +// build pod. +func (ctrl *Controller) startBuildForMachineConfigPool(pool *mcfgv1.MachineConfigPool) error { + osImageURLConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), machineConfigOSImageURLConfigMapName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("could not get OS image URL: %w", err) + } + + onClusterBuildConfigMap, err := ctrl.getOnClusterBuildConfig(pool) + if err != nil { + return fmt.Errorf("could not get configmap %q: %w", onClusterBuildConfigMapName, err) + } + + ibr := newImageBuildRequestWithConfigMap(pool, osImageURLConfigMap, onClusterBuildConfigMap) + + return ctrl.handleImageBuildRequest(ibr) +} + +// Gets the ConfigMap which specifies the name of the base image pull secret, final image pull secret, and final image pullspec. +func (ctrl *Controller) getOnClusterBuildConfig(pool *mcfgv1.MachineConfigPool) (*corev1.ConfigMap, error) { + onClusterBuildConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), onClusterBuildConfigMapName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get build controller config %q: %w", onClusterBuildConfigMapName, err) + } + + requiredKeys := []string{ + baseImagePullSecretNameConfigKey, + finalImagePushSecretNameConfigKey, + finalImagePullspecConfigKey, + } + + needToUpdateConfigMap := false + finalImagePullspecWithTag := "" + + for _, key := range requiredKeys { + val, ok := onClusterBuildConfigMap.Data[key] + if !ok { + return nil, fmt.Errorf("missing required key %q in configmap %s", key, onClusterBuildConfigMapName) + } + + if key == baseImagePullSecretNameConfigKey || key == finalImagePushSecretNameConfigKey { + secret, err := ctrl.validatePullSecret(val) + if err != nil { + return nil, err + } + + if strings.Contains(secret.Name, "canonical") { + klog.Infof("Updating build controller config %s to indicate we have a canonicalized secret %s", onClusterBuildConfigMapName, secret.Name) + onClusterBuildConfigMap.Data[key] = secret.Name + needToUpdateConfigMap = true + } + } + + if key == finalImagePullspecConfigKey { + // Replace the user-supplied tag (if present) with the name of the + // rendered MachineConfig for uniqueness. This will also allow us to + // eventually do a pre-build registry query to determine if we need to + // perform a build. + named, err := reference.ParseNamed(val) + if err != nil { + return nil, fmt.Errorf("could not parse %s with %q: %w", finalImagePullspecConfigKey, val, err) + } + + tagged, err := reference.WithTag(named, pool.Spec.Configuration.Name) + if err != nil { + return nil, fmt.Errorf("could not add tag %s to image pullspec %s: %w", pool.Spec.Configuration.Name, val, err) + } + + finalImagePullspecWithTag = tagged.String() + } + } + + // If we had to canonicalize a secret, that means the ConfigMap no longer + // points to the expected secret. So let's update the ConfigMap in the API + // server for the sake of consistency. + if needToUpdateConfigMap { + klog.Infof("Updating build controller config") + // TODO: Figure out why this causes failures with resourceVersions. + onClusterBuildConfigMap, err = ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Update(context.TODO(), onClusterBuildConfigMap, metav1.UpdateOptions{}) + if err != nil { + return nil, fmt.Errorf("could not update configmap %q: %w", onClusterBuildConfigMapName, err) + } + } + + // We don't want to write this back to the API server since it's only useful + // for this specific build. TODO: Migrate this to the ImageBuildRequest + // object so that it's generated on-demand instead. + onClusterBuildConfigMap.Data[finalImagePullspecConfigKey] = finalImagePullspecWithTag + + return onClusterBuildConfigMap, err +} + +// Ensure that the supplied pull secret exists, is in the correct format, etc. +func (ctrl *Controller) validatePullSecret(name string) (*corev1.Secret, error) { + secret, err := ctrl.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + oldSecretName := secret.Name + + secret, err = canonicalizePullSecret(secret) + if err != nil { + return nil, err + } + + // If a Docker pull secret lacks the top-level "auths" key, this means that + // it is a legacy-style pull secret. Buildah does not know how to correctly + // use one of these secrets. With that in mind, we "canonicalize" it, meaning + // we inject the existing legacy secret into a {"auths": {}} schema that + // Buildah can understand. We create a new K8s secret with this info and pass + // that secret into our image builder instead. + if strings.HasSuffix(secret.Name, canonicalSecretSuffix) { + klog.Infof("Found legacy-style secret %s, canonicalizing as %s", oldSecretName, secret.Name) + return ctrl.handleCanonicalizedPullSecret(secret) + } + + return secret, nil +} + +// Attempt to create a canonicalized pull secret. If the secret already exsits, we should update it. +func (ctrl *Controller) handleCanonicalizedPullSecret(secret *corev1.Secret) (*corev1.Secret, error) { + out, err := ctrl.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Get(context.TODO(), secret.Name, metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return nil, fmt.Errorf("could not get canonical secret %q: %w", secret.Name, err) + } + + // We don't have a canonical secret, so lets create one. + if k8serrors.IsNotFound(err) { + out, err = ctrl.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("could not create canonical secret %q: %w", secret.Name, err) + } + + klog.Infof("Created canonical secret %s", secret.Name) + return out, nil + } + + // Check if the canonical secret from the API server matches the one we have. + // If they match, then we don't need to do an update. + if bytes.Equal(secret.Data[corev1.DockerConfigJsonKey], out.Data[corev1.DockerConfigJsonKey]) { + klog.Infof("Canonical secret %q up-to-date", secret.Name) + return out, nil + } + + // If we got here, it means that our secret needs to be updated. + out.Data = secret.Data + out, err = ctrl.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Update(context.TODO(), out, metav1.UpdateOptions{}) + if err != nil { + return nil, fmt.Errorf("could not update canonical secret %q: %w", secret.Name, err) + } + + klog.Infof("Updated canonical secret %s", secret.Name) + + return out, nil +} + +// Starts a build for a given Image Build Request. +func (ctrl *Controller) handleImageBuildRequest(ibr ImageBuildRequest) error { + err := ctrl.prepareMachineConfigForPool(ibr) + if err != nil { + return fmt.Errorf("could not start build for MachineConfigPool %s: %w", ibr.Pool.Name, err) + } + + objRef, err := ctrl.imageBuilder.StartBuild(ibr) + + if err != nil { + return err + } + + return ctrl.markBuildPendingWithObjectRef(ibr.Pool, *objRef) +} + +// If one wants to opt out, this removes all of the statuses and object +// references from a given MachineConfigPool. +func (ctrl *Controller) finalizeOptOut(pool *mcfgv1.MachineConfigPool) error { + if err := ctrl.postBuildCleanup(pool, true); err != nil { + return err + } + + deleteBuildRefFromMachineConfigPool(pool) + + delete(pool.Annotations, ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey) + + conditions := []mcfgv1.MachineConfigPoolCondition{} + + for _, condition := range pool.Status.Conditions { + buildConditionFound := false + for _, buildConditionType := range getMachineConfigPoolBuildConditions() { + if condition.Type == buildConditionType { + buildConditionFound = true + break + } + } + + if !buildConditionFound { + conditions = append(conditions, condition) + } + } + + pool.Status.Conditions = conditions + return ctrl.updatePoolAndSyncStatus(pool, ctrl.syncAvailableStatus) +} + +// Fires whenever a MachineConfigPool is updated. +func (ctrl *Controller) updateMachineConfigPool(old, cur interface{}) { + oldPool := old.(*mcfgv1.MachineConfigPool).DeepCopy() + curPool := cur.(*mcfgv1.MachineConfigPool).DeepCopy() + + klog.V(4).Infof("Updating MachineConfigPool %s", oldPool.Name) + + doABuild, err := shouldWeDoABuild(ctrl.imageBuilder, oldPool, curPool) + if err != nil { + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return + } + + switch { + // We've transitioned from a layered pool to a non-layered pool. + case ctrlcommon.IsLayeredPool(oldPool) && !ctrlcommon.IsLayeredPool(curPool): + klog.V(4).Infof("MachineConfigPool %s has opted out of layering", curPool.Name) + if err := ctrl.finalizeOptOut(curPool); err != nil { + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return + } + // We need to do a build. + case doABuild: + klog.V(4).Infof("MachineConfigPool %s has changed, requiring a build", curPool.Name) + if err := ctrl.startBuildForMachineConfigPool(curPool); err != nil { + klog.Errorln(err) + ctrl.handleErr(err, curPool.Name) + return + } + // Everything else. + default: + klog.V(4).Infof("MachineConfigPool %s up-to-date", curPool.Name) + } + + ctrl.enqueueMachineConfigPool(curPool) +} + +// Fires whenever a MachineConfigPool is deleted. TODO: Wire up checks for +// deleting any in-progress builds. +func (ctrl *Controller) deleteMachineConfigPool(obj interface{}) { + pool, ok := obj.(*mcfgv1.MachineConfigPool) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + return + } + pool, ok = tombstone.Obj.(*mcfgv1.MachineConfigPool) + if !ok { + utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a MachineConfigPool %#v", obj)) + return + } + } + klog.V(4).Infof("Deleting MachineConfigPool %s", pool.Name) +} + +func (ctrl *Controller) syncAvailableStatus(pool *mcfgv1.MachineConfigPool) error { + // I'm not sure what the consequences are of not doing this. + //nolint:gocritic // Leaving this here for review purposes. + /* + if mcfgv1.IsMachineConfigPoolConditionFalse(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded) { + return nil + } + */ + sdegraded := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionFalse, "", "") + mcfgv1.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) + + if _, err := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { + return err + } + + return nil +} + +func (ctrl *Controller) syncFailingStatus(pool *mcfgv1.MachineConfigPool, err error) error { + sdegraded := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionTrue, "", fmt.Sprintf("Failed to build configuration for pool %s: %v", pool.Name, err)) + mcfgv1.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) + if _, updateErr := ctrl.mcfgclient.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); updateErr != nil { + klog.Errorf("Error updating MachineConfigPool %s: %v", pool.Name, updateErr) + } + return err +} + +// Searches a MachineConfigPoolStatusConfiguration for a given object reference. +func machineConfigPoolObjectRefSearchFunc(cfg mcfgv1.MachineConfigPoolStatusConfiguration, objRef corev1.ObjectReference) bool { + for _, src := range cfg.Source { + if src == objRef { + return true + } + } + + return false +} + +// Determines if a MachineConfigPool contains a given ObjectReference. +func machineConfigPoolHasObjectRef(pool *mcfgv1.MachineConfigPool, objRef corev1.ObjectReference) bool { + return machineConfigPoolObjectRefSearchFunc(pool.Spec.Configuration, objRef) && + machineConfigPoolObjectRefSearchFunc(pool.Status.Configuration, objRef) +} + +// Determines if a MachineConfigPool contains a reference to a Build or custom build pod. +func machineConfigPoolHasBuildRef(pool *mcfgv1.MachineConfigPool) bool { + buildName := newImageBuildRequest(pool).getBuildName() + + searchFunc := func(cfg mcfgv1.MachineConfigPoolStatusConfiguration) bool { + for _, src := range cfg.Source { + if src.Name == buildName && src.Kind != "MachineConfig" { + return true + } + } + + return false + } + + return searchFunc(pool.Spec.Configuration) && searchFunc(pool.Status.Configuration) +} + +// Deletes the build pod references from the MachineConfigPool. +func deleteBuildRefFromMachineConfigPool(pool *mcfgv1.MachineConfigPool) { + buildPodName := newImageBuildRequest(pool).getBuildName() + + deleteFunc := func(cfg mcfgv1.MachineConfigPoolStatusConfiguration) []corev1.ObjectReference { + configSources := []corev1.ObjectReference{} + + for _, src := range cfg.Source { + if src.Name != buildPodName { + configSources = append(configSources, src) + } + } + + return configSources + } + + pool.Spec.Configuration.Source = deleteFunc(pool.Spec.Configuration) + pool.Status.Configuration.Source = deleteFunc(pool.Status.Configuration) +} + +// Determines if two conditions are equal. Note: I purposely do not include the +// timestamp in the equality test, since we do not directly set it. +func isConditionEqual(cond1, cond2 mcfgv1.MachineConfigPoolCondition) bool { + return cond1.Type == cond2.Type && + cond1.Status == cond2.Status && + cond1.Message == cond2.Message && + cond1.Reason == cond2.Reason +} + +// Idempotently adds an ObjectRefence to a pool. +func addObjectRefIfMissing(pool *mcfgv1.MachineConfigPool, objRef corev1.ObjectReference) { + if !machineConfigPoolHasObjectRef(pool, objRef) { + pool.Spec.Configuration.Source = append(pool.Spec.Configuration.Source, objRef) + pool.Status.Configuration.Source = append(pool.Status.Configuration.Source, objRef) + } +} + +// Idempotently sets MCP build conditions on a given MachineConfigPool. +func setMCPBuildConditions(pool *mcfgv1.MachineConfigPool, conditions []mcfgv1.MachineConfigPoolCondition) { + for _, condition := range conditions { + condition := condition + currentCondition := mcfgv1.GetMachineConfigPoolCondition(pool.Status, condition.Type) + if currentCondition != nil && isConditionEqual(*currentCondition, condition) { + continue + } + + mcpCondition := mcfgv1.NewMachineConfigPoolCondition(condition.Type, condition.Status, condition.Reason, condition.Message) + mcfgv1.SetMachineConfigPoolCondition(&pool.Status, *mcpCondition) + } +} + +// Determine if we have a config change. +func isPoolConfigChange(oldPool, curPool *mcfgv1.MachineConfigPool) bool { + return oldPool.Spec.Configuration.Name != curPool.Spec.Configuration.Name +} + +// Determine if we have an image pullspec label. +func hasImagePullspecAnnotation(pool *mcfgv1.MachineConfigPool) bool { + imagePullspecAnnotation, ok := pool.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] + return imagePullspecAnnotation != "" && ok +} + +// Checks our pool to see if we can do a build. We base this off of a few criteria: +// 1. Is the pool opted into layering? +// 2. Do we have an object reference to an in-progress build? +// 3. Is the pool degraded? +// 4. Is our build in a specific state? +// +// Returns true if we are able to build. +func canPoolBuild(pool *mcfgv1.MachineConfigPool) bool { + // If we don't have a layered pool, we should not build. + if !ctrlcommon.IsLayeredPool(pool) { + return false + } + + // If we have a reference to an in-progress build, we should not build. + if machineConfigPoolHasBuildRef(pool) { + return false + } + + // If the pool is degraded, we should not build. + if isPoolDegraded(pool) { + return false + } + + // If the pool is in any of these states, we should not build. + conditionTypes := []mcfgv1.MachineConfigPoolConditionType{ + mcfgv1.MachineConfigPoolBuilding, + mcfgv1.MachineConfigPoolBuildPending, + mcfgv1.MachineConfigPoolBuildFailed, + } + + for _, conditionType := range conditionTypes { + if mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, conditionType) { + return false + } + } + + return true +} + +// Determines if a pool is in a degraded state. Returns true if the pool is in +// any kind of degraded state. +func isPoolDegraded(pool *mcfgv1.MachineConfigPool) bool { + degradedConditionTypes := []mcfgv1.MachineConfigPoolConditionType{ + mcfgv1.MachineConfigPoolDegraded, + mcfgv1.MachineConfigPoolRenderDegraded, + mcfgv1.MachineConfigPoolNodeDegraded, + } + + for _, conditionType := range degradedConditionTypes { + if mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, conditionType) { + return true + } + } + + return false +} + +// Determines if we should do a build based upon the state of our +// MachineConfigPool, the presence of a build pod, etc. +func shouldWeDoABuild(builder interface { + IsBuildRunning(*mcfgv1.MachineConfigPool) (bool, error) +}, oldPool, curPool *mcfgv1.MachineConfigPool) (bool, error) { + // If we don't have a layered pool, we should not build. + poolStateSuggestsBuild := canPoolBuild(curPool) && + // If we have a config change or we're missing an image pullspec label, we + // should do a build. + (isPoolConfigChange(oldPool, curPool) || !hasImagePullspecAnnotation(curPool)) && + // If we're missing a build pod reference, it likely means we don't need to + // do a build. + !machineConfigPoolHasBuildRef(curPool) + + if !poolStateSuggestsBuild { + return false, nil + } + + // If a build is found running, we should not do a build. + isRunning, err := builder.IsBuildRunning(curPool) + + return !isRunning, err +} + +// Enumerates all of the build-related MachineConfigPool condition types. +func getMachineConfigPoolBuildConditions() []mcfgv1.MachineConfigPoolConditionType { + return []mcfgv1.MachineConfigPoolConditionType{ + mcfgv1.MachineConfigPoolBuildFailed, + mcfgv1.MachineConfigPoolBuildPending, + mcfgv1.MachineConfigPoolBuildSuccess, + mcfgv1.MachineConfigPoolBuilding, + } +} + +// Determines if a pod or build is managed by this controller by examining its labels. +func hasAllRequiredOSBuildLabels(labels map[string]string) bool { + requiredLabels := []string{ + ctrlcommon.OSImageBuildPodLabel, + targetMachineConfigPoolLabel, + desiredConfigLabel, + } + + for _, label := range requiredLabels { + if _, ok := labels[label]; !ok { + return false + } + } + + return true +} diff --git a/pkg/controller/build/build_controller_test.go b/pkg/controller/build/build_controller_test.go new file mode 100644 index 0000000000..d5bd54d981 --- /dev/null +++ b/pkg/controller/build/build_controller_test.go @@ -0,0 +1,668 @@ +package build + +import ( + "context" + "fmt" + "os" + "time" + + ign3types "github.com/coreos/ignition/v2/config/v3_2/types" + buildv1 "github.com/openshift/api/build/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + fakeclientbuildv1 "github.com/openshift/client-go/build/clientset/versioned/fake" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + fakeclientmachineconfigv1 "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/fake" + testhelpers "github.com/openshift/machine-config-operator/test/helpers" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakecorev1client "k8s.io/client-go/kubernetes/fake" + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + + "testing" +) + +const ( + expectedImageSHA string = "sha256:628e4e8f0a78d91015c6cebeee95931ae2e8defe5dfb4ced4a82830e08937573" + expectedImagePullspecWithTag string = "registry.hostname.com/org/repo:latest" + expectedImagePullspecWithSHA string = "registry.hostname.com/org/repo@" + expectedImageSHA +) + +type optInFunc func(context.Context, *testing.T, *Clients, string) + +func TestMain(m *testing.M) { + klog.InitFlags(nil) + os.Exit(m.Run()) +} + +func TestBuildControllerNoPoolsOptedIn(t *testing.T) { + t.Parallel() + + fixture := newBuildControllerTestFixture(t) + fixture.runTestFuncs(t, testFuncs{ + imageBuilder: testNoMCPsOptedIn, + customPodBuilder: testNoMCPsOptedIn, + }) +} + +func TestBuildControllerSingleOptedInPool(t *testing.T) { + pool := "worker" + + t.Parallel() + + t.Run("Happy Path", func(t *testing.T) { + t.Parallel() + + newBuildControllerTestFixture(t).runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptInMCPImageBuilder(ctx, t, cs, pool) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptInMCPCustomBuildPod(ctx, t, cs, pool) + }, + }) + }) + + t.Run("Happy Path Multiple Configs", func(t *testing.T) { + t.Parallel() + + newBuildControllerTestFixture(t).runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptInMCPImageBuilder(ctx, t, cs, pool) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptInMCPCustomBuildPod(ctx, t, cs, pool) + }, + }) + }) + + t.Run("Build Failure", func(t *testing.T) { + t.Parallel() + + newBuildControllerTestFixture(t).runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + mcp := optInMCP(ctx, t, cs, pool) + assertMCPFollowsImageBuildStatus(ctx, t, cs, mcp, buildv1.BuildPhaseFailed) + assertMachineConfigPoolReachesState(ctx, t, cs, pool, isMCPBuildFailure) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + mcp := optInMCP(ctx, t, cs, pool) + assertMCPFollowsBuildPodStatus(ctx, t, cs, mcp, corev1.PodFailed) + assertMachineConfigPoolReachesState(ctx, t, cs, pool, isMCPBuildFailure) + }, + }) + }) + + t.Run("Degraded Pool", func(t *testing.T) { + t.Parallel() + + newBuildControllerTestFixture(t).runTestFuncs(t, testFuncs{ + imageBuilder: testMCPIsDegraded, + customPodBuilder: testMCPIsDegraded, + }) + }) + + t.Run("Opted-in pool opts out", func(t *testing.T) { + t.Parallel() + + newBuildControllerTestFixture(t).runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptedInMCPOptsOut(ctx, t, cs, testOptInMCPImageBuilder) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptedInMCPOptsOut(ctx, t, cs, testOptInMCPCustomBuildPod) + }, + }) + }) + + t.Run("Built pool gets unrelated update", func(t *testing.T) { + t.Parallel() + + newBuildControllerTestFixture(t).runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptedInMCPOptsOut(ctx, t, cs, testOptInMCPImageBuilder) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testOptedInMCPOptsOut(ctx, t, cs, testOptInMCPCustomBuildPod) + }, + }) + }) +} + +func TestBuildControllerMultipleOptedInPools(t *testing.T) { + t.Parallel() + + pools := []string{"master", "worker"} + + // Tests that a single config is rolled out to the target MachineConfigPools. + t.Run("Happy Path", func(t *testing.T) { + t.Parallel() + + fixture := newBuildControllerTestFixture(t) + for _, pool := range pools { + pool := pool + t.Run(pool, func(t *testing.T) { + t.Parallel() + fixture.runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + t.Logf("Running in pool %s", pool) + testOptInMCPImageBuilder(ctx, t, cs, pool) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + t.Logf("Running in pool %s", pool) + testOptInMCPCustomBuildPod(ctx, t, cs, pool) + }, + }) + }) + } + }) + + // Tests that multiple configs are serially rolled out to the target + // MachineConfigPool and ensures that each config is rolled out before moving + // onto the next one. + t.Run("Happy Path Multiple Configs", func(t *testing.T) { + t.Parallel() + + fixture := newBuildControllerTestFixture(t) + + for _, pool := range pools { + pool := pool + t.Run(pool, func(t *testing.T) { + t.Parallel() + + fixture.runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testMultipleConfigsAreRolledOut(ctx, t, cs, pool, testOptInMCPImageBuilder) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + testMultipleConfigsAreRolledOut(ctx, t, cs, pool, testOptInMCPCustomBuildPod) + }, + }) + }) + } + }) + + // Tests that a build failure degrades the MachineConfigPool + t.Run("Build Failure", func(t *testing.T) { + t.Parallel() + + fixture := newBuildControllerTestFixture(t) + + for _, pool := range pools { + pool := pool + t.Run(pool, func(t *testing.T) { + t.Parallel() + + fixture.runTestFuncs(t, testFuncs{ + imageBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + mcp := optInMCP(ctx, t, cs, pool) + assertMCPFollowsImageBuildStatus(ctx, t, cs, mcp, buildv1.BuildPhaseFailed) + assertMachineConfigPoolReachesState(ctx, t, cs, pool, isMCPBuildFailure) + }, + customPodBuilder: func(ctx context.Context, t *testing.T, cs *Clients) { + mcp := optInMCP(ctx, t, cs, pool) + assertMCPFollowsBuildPodStatus(ctx, t, cs, mcp, corev1.PodFailed) + assertMachineConfigPoolReachesState(ctx, t, cs, pool, isMCPBuildFailure) + }, + }) + }) + } + }) +} + +// Holds a name and function to implement a given BuildController test. +type buildControllerTestFixture struct { + ctx context.Context + t *testing.T + imageBuilderClient *Clients + customPodBuilderClient *Clients +} + +type testFuncs struct { + imageBuilder func(context.Context, *testing.T, *Clients) + customPodBuilder func(context.Context, *testing.T, *Clients) +} + +func newBuildControllerTestFixtureWithContext(ctx context.Context, t *testing.T) *buildControllerTestFixture { + b := &buildControllerTestFixture{ + ctx: ctx, + t: t, + } + + b.imageBuilderClient = b.startBuildControllerWithImageBuilder() + b.customPodBuilderClient = b.startBuildControllerWithCustomPodBuilder() + + return b +} + +func newBuildControllerTestFixture(t *testing.T) *buildControllerTestFixture { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + t.Cleanup(cancel) + + return newBuildControllerTestFixtureWithContext(ctx, t) +} + +func (b *buildControllerTestFixture) runTestFuncs(t *testing.T, tf testFuncs) { + t.Run("CustomBuildPod", func(t *testing.T) { + t.Parallel() + // t.Cleanup(func() { + // dumpObjects(b.ctx, t, b.customPodBuilderClient, t.Name()) + // }) + tf.customPodBuilder(b.ctx, t, b.customPodBuilderClient) + }) + + t.Run("ImageBuilder", func(t *testing.T) { + t.Parallel() + // t.Cleanup(func() { + // dumpObjects(b.ctx, t, b.imageBuilderClient, t.Name()) + // }) + + tf.imageBuilder(b.ctx, t, b.imageBuilderClient) + }) +} + +func (b *buildControllerTestFixture) setupClients() *Clients { + objects := newMachineConfigPoolAndConfigs("master", "rendered-master-1") + objects = append(objects, newMachineConfigPoolAndConfigs("worker", "rendered-worker-1")...) + objects = append(objects, &mcfgv1.ControllerConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-config-controller", + }, + }) + + onClusterBuildConfigMap := getOnClusterBuildConfigMap() + + legacyPullSecret := `{"registry.hostname.com": {"username": "user", "password": "s3kr1t", "auth": "s00pers3kr1t", "email": "user@hostname.com"}}` + + pullSecret := `{"auths":{"registry.hostname.com": {"username": "user", "password": "s3kr1t", "auth": "s00pers3kr1t", "email": "user@hostname.com"}}}` + + return &Clients{ + mcfgclient: fakeclientmachineconfigv1.NewSimpleClientset(objects...), + kubeclient: fakecorev1client.NewSimpleClientset( + getOSImageURLConfigMap(), + onClusterBuildConfigMap, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: onClusterBuildConfigMap.Data["finalImagePushSecretName"], + Namespace: ctrlcommon.MCONamespace, + }, + Data: map[string][]byte{ + corev1.DockerConfigKey: []byte(legacyPullSecret), + }, + Type: corev1.SecretTypeDockercfg, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: onClusterBuildConfigMap.Data["baseImagePullSecretName"], + Namespace: ctrlcommon.MCONamespace, + }, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: []byte(pullSecret), + }, + Type: corev1.SecretTypeDockerConfigJson, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etc-pki-entitlement", + Namespace: "openshift-config-managed", + }, + Data: map[string][]byte{ + "entitlement-key.pem": []byte("abc"), + "entitlement.pem": []byte("123"), + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-config-operator", + Namespace: ctrlcommon.MCONamespace, + }, + }, + ), + buildclient: fakeclientbuildv1.NewSimpleClientset(), + } +} + +func (b *buildControllerTestFixture) getConfig() BuildControllerConfig { + return BuildControllerConfig{ + MaxRetries: 5, + UpdateDelay: time.Millisecond, + } +} + +// Instantiates all of the initial objects and starts the BuildController. +func (b *buildControllerTestFixture) startBuildControllerWithImageBuilder() *Clients { + clients := b.setupClients() + + ctrl := NewWithImageBuilder(b.getConfig(), clients) + + go ctrl.Run(b.ctx, 5) + + return clients +} + +func (b *buildControllerTestFixture) startBuildControllerWithCustomPodBuilder() *Clients { + clients := b.setupClients() + + ctrl := NewWithCustomPodBuilder(b.getConfig(), clients) + + go ctrl.Run(b.ctx, 5) + + return clients +} + +// Helper that determines if the build is a success. +func isMCPBuildSuccess(mcp *mcfgv1.MachineConfigPool) bool { + imagePullspec, hasConfigAnnotation := mcp.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] + + return hasConfigAnnotation && + ctrlcommon.IsLayeredPool(mcp) && + (imagePullspec == expectedImagePullspecWithSHA || imagePullspec == "fake@logs") && + mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolBuildSuccess) && + !machineConfigPoolHasBuildRef(mcp) && machineConfigPoolHasMachineConfigRefs(mcp) +} + +func machineConfigPoolHasMachineConfigRefs(pool *mcfgv1.MachineConfigPool) bool { + expectedMCP := newMachineConfigPool(pool.Name) + + for _, ref := range expectedMCP.Spec.Configuration.Source { + if !machineConfigPoolHasObjectRef(pool, ref) { + return false + } + } + + return true +} + +// Helper that determines if the build was a failure. +func isMCPBuildFailure(mcp *mcfgv1.MachineConfigPool) bool { + return ctrlcommon.IsLayeredPool(mcp) && + mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolBuildFailed) && + mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolDegraded) && + machineConfigPoolHasBuildRef(mcp) && machineConfigPoolHasMachineConfigRefs(mcp) +} + +// Opts a given MachineConfigPool into layering and asserts that the MachineConfigPool reaches the desired state. +func testOptInMCPCustomBuildPod(ctx context.Context, t *testing.T, cs *Clients, poolName string) { + mcp := optInMCP(ctx, t, cs, poolName) + assertMCPFollowsBuildPodStatus(ctx, t, cs, mcp, corev1.PodSucceeded) + assertMachineConfigPoolReachesState(ctx, t, cs, poolName, isMCPBuildSuccess) +} + +// Opts a given MachineConfigPool into layering and asserts that the MachineConfigPool reaches the desired state. +func testOptInMCPImageBuilder(ctx context.Context, t *testing.T, cs *Clients, poolName string) { + mcp := optInMCP(ctx, t, cs, poolName) + assertMCPFollowsImageBuildStatus(ctx, t, cs, mcp, buildv1.BuildPhaseComplete) + assertMachineConfigPoolReachesState(ctx, t, cs, poolName, isMCPBuildSuccess) +} + +// Mutates all MachineConfigPools that are not opted in to ensure they are ignored. +func testNoMCPsOptedIn(ctx context.Context, t *testing.T, cs *Clients) { + // Set an unrelated label to force a sync. + mcpList, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + for _, mcp := range mcpList.Items { + mcp := mcp + mcp.Labels["a-label-key"] = "" + _, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, &mcp, metav1.UpdateOptions{}) + require.NoError(t, err) + } + + mcpList, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + for _, mcp := range mcpList.Items { + mcp := mcp + assert.False(t, ctrlcommon.IsLayeredPool(&mcp)) + assert.NotContains(t, mcp.Labels, ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey) + } +} + +// Rolls out multiple configs to a given pool, asserting that each config is completely rolled out before moving onto the next. +func testMultipleConfigsAreRolledOut(ctx context.Context, t *testing.T, cs *Clients, poolName string, optInFunc optInFunc) { + for i := 1; i < 10; i++ { + config := fmt.Sprintf("rendered-%s-%d", poolName, i) + + t.Run(config, func(t *testing.T) { + workerMCP, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + require.NoError(t, err) + + workerMCP.Spec.Configuration.Name = config + + renderedMC := testhelpers.NewMachineConfig( + config, + map[string]string{ + ctrlcommon.GeneratedByControllerVersionAnnotationKey: "version-number", + "machineconfiguration.openshift.io/role": poolName, + }, + "", + []ign3types.File{}) + + _, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigs().Create(ctx, renderedMC, metav1.CreateOptions{}) + if err != nil && !k8serrors.IsAlreadyExists(err) { + require.NoError(t, err) + } + + _, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, workerMCP, metav1.UpdateOptions{}) + require.NoError(t, err) + optInFunc(ctx, t, cs, poolName) + + var targetPool *mcfgv1.MachineConfigPool + + outcome := assertMachineConfigPoolReachesState(ctx, t, cs, poolName, func(mcp *mcfgv1.MachineConfigPool) bool { + targetPool = mcp + return mcp.Spec.Configuration.Name == config && isMCPBuildSuccess(mcp) && machineConfigPoolHasMachineConfigRefs(mcp) + }) + + if !outcome { + t.Logf("Config name, actual: %s, expected: %v", targetPool.Spec.Configuration.Name, config) + t.Logf("Is build success? %v", isMCPBuildSuccess(targetPool)) + t.Logf("Has all MachineConfig refs? %v", machineConfigPoolHasMachineConfigRefs(targetPool)) + } + + time.Sleep(time.Millisecond) + }) + } +} + +// Tests that an opted-in MachineConfigPool is able to opt back out. +func testOptedInMCPOptsOut(ctx context.Context, t *testing.T, cs *Clients, optInFunc optInFunc) { + optInFunc(ctx, t, cs, "worker") + + optOutMCP(ctx, t, cs, "worker") + + assertMachineConfigPoolReachesState(ctx, t, cs, "worker", func(mcp *mcfgv1.MachineConfigPool) bool { + layeringLabels := []string{ + ctrlcommon.LayeringEnabledPoolLabel, + } + + for _, label := range layeringLabels { + if _, ok := mcp.Labels[label]; ok { + return false + } + } + + for _, condition := range getMachineConfigPoolBuildConditions() { + if mcfgv1.IsMachineConfigPoolConditionPresentAndEqual(mcp.Status.Conditions, condition, corev1.ConditionTrue) || + mcfgv1.IsMachineConfigPoolConditionPresentAndEqual(mcp.Status.Conditions, condition, corev1.ConditionFalse) { + return false + } + } + + return !machineConfigPoolHasBuildRef(mcp) + }) +} + +// Tests that if a MachineConfigPool is degraded, that a build (object / pod) is not created. +func testMCPIsDegraded(ctx context.Context, t *testing.T, cs *Clients) { + mcp, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(ctx, "worker", metav1.GetOptions{}) + require.NoError(t, err) + + mcp.Labels[ctrlcommon.LayeringEnabledPoolLabel] = "" + + condition := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolDegraded, corev1.ConditionTrue, "", "") + mcfgv1.SetMachineConfigPoolCondition(&mcp.Status, *condition) + + _, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, mcp, metav1.UpdateOptions{}) + require.NoError(t, err) + + assertMachineConfigPoolReachesState(ctx, t, cs, "worker", func(mcp *mcfgv1.MachineConfigPool) bool { + // TODO: Should we fail the build without even starting it if the pool is degraded? + for _, condition := range getMachineConfigPoolBuildConditions() { + if mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, condition) { + return false + } + } + + return mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolDegraded) && + assertNoBuildPods(ctx, t, cs) && + assertNoBuilds(ctx, t, cs) + }) +} + +// Tests that a label update or similar does not cause a build to occur. +func testBuiltPoolGetsUnrelatedUpdate(ctx context.Context, t *testing.T, cs *Clients, optInFunc optInFunc) { + optInFunc(ctx, t, cs, "worker") + + pool, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(ctx, "worker", metav1.GetOptions{}) + require.NoError(t, err) + + pool.Annotations["unrelated-annotation"] = "hello" + pool.Labels["unrelated-label"] = "" + _, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, pool, metav1.UpdateOptions{}) + require.NoError(t, err) + + assertMachineConfigPoolReachesState(ctx, t, cs, "worker", func(mcp *mcfgv1.MachineConfigPool) bool { + return assert.Equal(t, mcp.Status.Conditions, pool.Status.Conditions) && + assertNoBuildPods(ctx, t, cs) && + assertNoBuilds(ctx, t, cs) + }) +} + +// Mocks whether a given build is running. +type mockIsBuildRunning bool + +func (m *mockIsBuildRunning) IsBuildRunning(*mcfgv1.MachineConfigPool) (bool, error) { + return bool(*m), nil +} + +// Tests if we should do a build for a variety of edge-cases and circumstances. +func TestShouldWeDoABuild(t *testing.T) { + t.Parallel() + + // Mutators which mutate the given MachineConfigPool. + toLayeredPool := func(mcp *mcfgv1.MachineConfigPool) *mcfgv1.MachineConfigPool { + mcp.Labels[ctrlcommon.LayeringEnabledPoolLabel] = "" + return mcp + } + + toLayeredPoolWithImagePullspec := func(mcp *mcfgv1.MachineConfigPool) *mcfgv1.MachineConfigPool { + mcp = toLayeredPool(mcp) + mcp.Annotations = map[string]string{ + ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey: "image-pullspec", + } + return mcp + } + + toLayeredPoolWithConditionsSet := func(mcp *mcfgv1.MachineConfigPool, conditions []mcfgv1.MachineConfigPoolCondition) *mcfgv1.MachineConfigPool { + mcp = toLayeredPoolWithImagePullspec(mcp) + setMCPBuildConditions(mcp, conditions) + return mcp + } + + type shouldWeBuildTestCase struct { + name string + oldPool *mcfgv1.MachineConfigPool + curPool *mcfgv1.MachineConfigPool + buildRunning bool + expected bool + } + + testCases := []shouldWeBuildTestCase{ + { + name: "Non-layered pool", + oldPool: newMachineConfigPool("worker", "rendered-worker-1"), + curPool: newMachineConfigPool("worker", "rendered-worker-1"), + expected: false, + }, + { + name: "Layered pool config change with missing image pullspec", + oldPool: toLayeredPool(newMachineConfigPool("worker", "rendered-worker-1")), + curPool: toLayeredPool(newMachineConfigPool("worker", "rendered-worker-2")), + expected: true, + }, + { + name: "Layered pool with no config change and missing image pullspec", + oldPool: toLayeredPool(newMachineConfigPool("worker", "rendered-worker-1")), + curPool: toLayeredPool(newMachineConfigPool("worker", "rendered-worker-1")), + expected: true, + }, + { + name: "Layered pool with image pullspec", + oldPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-1")), + curPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-1")), + }, + { + name: "Layered pool with build pod", + oldPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-1")), + curPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-1")), + buildRunning: true, + expected: false, + }, + { + name: "Layered pool with prior successful build and config change", + oldPool: toLayeredPoolWithConditionsSet(newMachineConfigPool("worker", "rendered-worker-1"), []mcfgv1.MachineConfigPoolCondition{ + { + Type: mcfgv1.MachineConfigPoolBuildSuccess, + Status: corev1.ConditionTrue, + }, + }), + curPool: toLayeredPoolWithImagePullspec(newMachineConfigPool("worker", "rendered-worker-2")), + expected: true, + }, + } + + // Generate additional test cases programmatically. + buildStates := map[mcfgv1.MachineConfigPoolConditionType]string{ + mcfgv1.MachineConfigPoolBuildFailed: "failed", + mcfgv1.MachineConfigPoolBuildPending: "pending", + mcfgv1.MachineConfigPoolBuilding: "in progress", + mcfgv1.MachineConfigPoolDegraded: "degraded", + mcfgv1.MachineConfigPoolNodeDegraded: "node degraded", + mcfgv1.MachineConfigPoolRenderDegraded: "render degraded", + } + + for conditionType, name := range buildStates { + conditions := []mcfgv1.MachineConfigPoolCondition{ + { + Type: conditionType, + Status: corev1.ConditionTrue, + }, + } + + testCases = append(testCases, shouldWeBuildTestCase{ + name: fmt.Sprintf("Layered pool with %s build", name), + oldPool: toLayeredPoolWithConditionsSet(newMachineConfigPool("worker", "rendered-worker-1"), conditions), + curPool: toLayeredPoolWithConditionsSet(newMachineConfigPool("worker", "rendered-worker-1"), conditions), + expected: false, + }) + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + mb := mockIsBuildRunning(testCase.buildRunning) + + doABuild, err := shouldWeDoABuild(&mb, testCase.oldPool, testCase.curPool) + assert.NoError(t, err) + assert.Equal(t, testCase.expected, doABuild) + }) + } +} diff --git a/pkg/controller/build/fixtures_test.go b/pkg/controller/build/fixtures_test.go new file mode 100644 index 0000000000..eec68a7747 --- /dev/null +++ b/pkg/controller/build/fixtures_test.go @@ -0,0 +1,582 @@ +package build + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + "testing" + "time" + + ign3types "github.com/coreos/ignition/v2/config/v3_2/types" + "github.com/davecgh/go-spew/spew" + "github.com/ghodss/yaml" + buildv1 "github.com/openshift/api/build/v1" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + testhelpers "github.com/openshift/machine-config-operator/test/helpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" +) + +// Gets an example machine-config-osimageurl ConfigMap. +func getOSImageURLConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineConfigOSImageURLConfigMapName, + Namespace: ctrlcommon.MCONamespace, + }, + Data: map[string]string{ + baseOSContainerImageConfigKey: "registry.ci.openshift.org/ocp/4.14-2023-05-29-125629@sha256:12e89d631c0ca1700262583acfb856b6e7dbe94800cb38035d68ee5cc912411c", + baseOSExtensionsContainerImageConfigKey: "registry.ci.openshift.org/ocp/4.14-2023-05-29-125629@sha256:5b6d901069e640fc53d2e971fa1f4802bf9dea1a4ffba67b8a17eaa7d8dfa336", + osImageURLConfigKey: "registry.ci.openshift.org/ocp/4.14-2023-05-29-125629@sha256:4f7792412d1559bf0a996edeff5e836e210f6d77df94b552a3866144d043bce1", + releaseVersionConfigKey: "4.14.0-0.ci-2023-05-29-125629", + }, + } +} + +// Gets an example on-cluster-build-config ConfigMap. +func getOnClusterBuildConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: onClusterBuildConfigMapName, + Namespace: ctrlcommon.MCONamespace, + }, + Data: map[string]string{ + baseImagePullSecretNameConfigKey: "base-image-pull-secret", + finalImagePushSecretNameConfigKey: "final-image-push-secret", + finalImagePullspecConfigKey: expectedImagePullspecWithTag, + }, + } +} + +// Creates a new MachineConfigPool and the corresponding MachineConfigs. +func newMachineConfigPoolAndConfigs(name string, params ...string) []runtime.Object { + mcp := newMachineConfigPool(name, params...) + + out := []runtime.Object{mcp} + + files := []ign3types.File{} + + // Create individual MachineConfigs to accompany the child MachineConfigs referred to by our MachineConfigPool. + for _, childConfig := range mcp.Spec.Configuration.Source { + if childConfig.Kind != "MachineConfig" { + continue + } + + filename := fmt.Sprintf("/etc/%s", childConfig.Name) + file := ctrlcommon.NewIgnFile(filename, childConfig.Name) + files = append(files, file) + + out = append(out, testhelpers.NewMachineConfig( + childConfig.Name, + map[string]string{ + "machineconfiguration.openshift.io/role": name, + }, + "", + []ign3types.File{file})) + } + + // Create a rendered MachineConfig to accompany our MachineConfigPool. + out = append(out, testhelpers.NewMachineConfig( + mcp.Spec.Configuration.Name, + map[string]string{ + ctrlcommon.GeneratedByControllerVersionAnnotationKey: "version-number", + "machineconfiguration.openshift.io/role": name, + }, + "", + files)) + + return out +} + +// Creates a simple MachineConfigPool object for testing. Requires a name for +// the MachineConfigPool, optionally accepts a name for the rendered config. +func newMachineConfigPool(name string, params ...string) *mcfgv1.MachineConfigPool { + renderedConfigName := "" + if len(params) >= 1 { + renderedConfigName = params[0] + } else { + renderedConfigName = fmt.Sprintf("rendered-%s-1", name) + } + + childConfigs := []corev1.ObjectReference{} + for i := 1; i <= 5; i++ { + childConfigs = append(childConfigs, corev1.ObjectReference{ + Name: fmt.Sprintf("%s-config-%d", name, i), + Kind: "MachineConfig", + }) + } + + nodeRoleLabel := fmt.Sprintf("node-role.kubernetes.io/%s", name) + nodeSelector := metav1.AddLabelToSelector(&metav1.LabelSelector{}, nodeRoleLabel, "") + + poolSelector := metav1.AddLabelToSelector(&metav1.LabelSelector{}, mcfgv1.MachineConfigRoleLabelKey, name) + + mcp := testhelpers.NewMachineConfigPool(name, poolSelector, nodeSelector, renderedConfigName) + mcp.Spec.Configuration.Source = append(mcp.Spec.Configuration.Source, childConfigs...) + mcp.Status.Configuration.Source = append(mcp.Status.Configuration.Source, childConfigs...) + + return mcp +} + +// Opts a MachineConfigPool into layering. +func optInMCP(ctx context.Context, t *testing.T, cs *Clients, poolName string) *mcfgv1.MachineConfigPool { + t.Helper() + + mcp, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + require.NoError(t, err) + + mcp.Labels[ctrlcommon.LayeringEnabledPoolLabel] = "" + + mcp, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, mcp, metav1.UpdateOptions{}) + require.NoError(t, err) + + return mcp +} + +// Opts a MachineConfigPool out of layering. +func optOutMCP(ctx context.Context, t *testing.T, cs *Clients, poolName string) { + t.Helper() + + mcp, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + require.NoError(t, err) + + delete(mcp.Labels, ctrlcommon.LayeringEnabledPoolLabel) + + _, err = cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, mcp, metav1.UpdateOptions{}) + require.NoError(t, err) +} + +// Polls until a MachineConfigPool reaches a desired state. +func assertMachineConfigPoolReachesState(ctx context.Context, t *testing.T, cs *Clients, poolName string, checkFunc func(*mcfgv1.MachineConfigPool) bool) bool { + t.Helper() + + pollCtx, cancel := context.WithTimeout(ctx, time.Second*10) + t.Cleanup(cancel) + + err := wait.PollImmediateUntilWithContext(pollCtx, time.Millisecond, func(c context.Context) (bool, error) { + mcp, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(c, poolName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + return checkFunc(mcp), nil + }) + + return assert.NoError(t, err, "MachineConfigPool %s never reached desired state", poolName) +} + +// Asserts that there are no build pods. +func assertNoBuildPods(ctx context.Context, t *testing.T, cs *Clients) bool { + t.Helper() + + foundBuildPods := false + + buildPodNames := []string{} + + podList, err := cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + for _, pod := range podList.Items { + pod := pod + if hasAllRequiredOSBuildLabels(pod.Labels) { + foundBuildPods = true + buildPodNames = append(buildPodNames, pod.Name) + } + } + + return assert.False(t, foundBuildPods, "expected not to find build pods, found: %v", buildPodNames) +} + +// Asserts that there are no builds. +func assertNoBuilds(ctx context.Context, t *testing.T, cs *Clients) bool { + t.Helper() + + foundBuilds := false + + buildNames := []string{} + + buildList, err := cs.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + for _, build := range buildList.Items { + build := build + if hasAllRequiredOSBuildLabels(build.Labels) { + foundBuilds = true + buildNames = append(buildNames, build.Name) + } + } + + return assert.False(t, foundBuilds, "expected not to find builds, found: %v", buildNames) +} + +// Asserts that ConfigMaps were created. +func assertConfigMapsCreated(ctx context.Context, t *testing.T, cs *Clients, ibr ImageBuildRequest) bool { + t.Helper() + + isFound := func(name string, configmapList *corev1.ConfigMapList) bool { + for _, item := range configmapList.Items { + if item.Name == name && hasAllRequiredOSBuildLabels(item.Labels) { + return true + } + } + + return false + } + + expectedConfigmaps := map[string]bool{ + ibr.getDockerfileConfigMapName(): false, + ibr.getMCConfigMapName(): false, + } + + err := wait.PollImmediateInfiniteWithContext(ctx, time.Millisecond, func(ctx context.Context) (bool, error) { + configmapList, err := cs.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + + for expected := range expectedConfigmaps { + if isFound(expected, configmapList) { + expectedConfigmaps[expected] = true + } else { + return false, nil + } + } + + return true, nil + }) + + return assert.NoError(t, err, "configmap(s) was not created %v", expectedConfigmaps) +} + +// Polls until a build is created. +func assertBuildIsCreated(ctx context.Context, t *testing.T, cs *Clients, ibr ImageBuildRequest) bool { + t.Helper() + + buildName := ibr.getBuildName() + + err := wait.PollImmediateInfiniteWithContext(ctx, time.Millisecond, func(ctx context.Context) (bool, error) { + buildList, err := cs.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + + for _, build := range buildList.Items { + if build.Name == buildName { + return true, nil + } + } + + return false, nil + }) + + return assert.NoError(t, err, "build %s was not created", buildName) +} + +// Polls until a build pod is created. +func assertBuildPodIsCreated(ctx context.Context, t *testing.T, cs *Clients, ibr ImageBuildRequest) bool { + t.Helper() + + buildPodName := ibr.getBuildName() + + err := wait.PollImmediateInfiniteWithContext(ctx, time.Millisecond, func(ctx context.Context) (bool, error) { + podList, err := cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + + for _, pod := range podList.Items { + if pod.Name == buildPodName { + return true, nil + } + } + + return false, nil + }) + + return assert.NoError(t, err, "build pod %s was not created", buildPodName) +} + +// Simulates a pod being scheduled and reaching various states. Verifies that +// the target MachineConfigPool reaches the expected states as it goes. +func assertMCPFollowsImageBuildStatus(ctx context.Context, t *testing.T, cs *Clients, mcp *mcfgv1.MachineConfigPool, endingPhase buildv1.BuildPhase) bool { //nolint:unparam // This param is actually used. + t.Helper() + + var outcome bool + + defer func() { + assert.True(t, outcome) + }() + + // Each of the various pod phases we're interested in. + buildPhases := []buildv1.BuildPhase{ + buildv1.BuildPhaseNew, + buildv1.BuildPhasePending, + buildv1.BuildPhaseRunning, + endingPhase, + } + + // Each pod phase is correllated to a MachineConfigPoolConditionType. + buildPhaseToMCPCondition := map[buildv1.BuildPhase]mcfgv1.MachineConfigPoolConditionType{ + buildv1.BuildPhaseNew: mcfgv1.MachineConfigPoolBuildPending, + buildv1.BuildPhasePending: mcfgv1.MachineConfigPoolBuildPending, + buildv1.BuildPhaseRunning: mcfgv1.MachineConfigPoolBuilding, + buildv1.BuildPhaseComplete: mcfgv1.MachineConfigPoolBuildSuccess, + buildv1.BuildPhaseError: mcfgv1.MachineConfigPoolBuildFailed, + buildv1.BuildPhaseFailed: mcfgv1.MachineConfigPoolBuildFailed, + buildv1.BuildPhaseCancelled: mcfgv1.MachineConfigPoolBuildFailed, + } + + // Determine if the MachineConfigPool should have a reference to the build pod. + shouldHaveBuildRef := map[buildv1.BuildPhase]bool{ + buildv1.BuildPhaseNew: true, + buildv1.BuildPhasePending: true, + buildv1.BuildPhaseRunning: true, + buildv1.BuildPhaseComplete: false, + buildv1.BuildPhaseError: true, + buildv1.BuildPhaseFailed: true, + buildv1.BuildPhaseCancelled: true, + } + + ibr := newImageBuildRequest(mcp) + + buildName := ibr.getBuildName() + + // Wait for the build pod to be created. + outcome = assertBuildIsCreated(ctx, t, cs, ibr) + if !outcome { + return false + } + + outcome = assertConfigMapsCreated(ctx, t, cs, ibr) + if !outcome { + return false + } + + // Cycle through each of the build pod phases. + for _, phase := range buildPhases { + // Get the build pod by name. + build, err := cs.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(ctx, buildName, metav1.GetOptions{}) + require.NoError(t, err) + + // Set the pod phase and update it. + build.Status.Phase = phase + + // If we're successful, the build object should have an image pullspec attached to it. + // TODO: Need to figure out how / where to set this on the custom pod builder. + if phase == buildv1.BuildPhaseComplete { + build.Status.OutputDockerImageReference = expectedImagePullspecWithTag + build.Status.Output.To = &buildv1.BuildStatusOutputTo{ + ImageDigest: expectedImageSHA, + } + } + + _, err = cs.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Update(ctx, build, metav1.UpdateOptions{}) + require.NoError(t, err) + + // Look up the expected MCP condition for our current pod phase. + expectedMCPCondition := buildPhaseToMCPCondition[phase] + + // Look up the expected build pod condition for our current pod phase. + expectedBuildRefPresence := shouldHaveBuildRef[phase] + + var targetPool *mcfgv1.MachineConfigPool + + // Wait for the MCP condition to reach the expected state. + outcome = assertMachineConfigPoolReachesState(ctx, t, cs, mcp.Name, func(mcp *mcfgv1.MachineConfigPool) bool { + targetPool = mcp + return mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, expectedMCPCondition) && + expectedBuildRefPresence == machineConfigPoolHasBuildRef(mcp) && + machineConfigPoolHasMachineConfigRefs(mcp) + }) + + if !outcome { + spew.Dump(targetPool) + t.Logf("Has expected condition (%s) for phase (%s)? %v", expectedMCPCondition, phase, mcfgv1.IsMachineConfigPoolConditionTrue(targetPool.Status.Conditions, expectedMCPCondition)) + t.Logf("Has ref? %v. Expected: %v. Actual: %v.", expectedBuildRefPresence == machineConfigPoolHasBuildRef(targetPool), expectedBuildRefPresence, machineConfigPoolHasBuildRef(targetPool)) + t.Logf("Has MachineConfig refs? %v", machineConfigPoolHasMachineConfigRefs(targetPool)) + return false + } + } + + // Find out what happened to the build and its objects. + _, err := cs.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(ctx, buildName, metav1.GetOptions{}) + switch endingPhase { + case buildv1.BuildPhaseComplete: + // If the build pod was successful, looking it up should fail because it should have been deleted. + outcome = assert.Error(t, err) + default: + // If the build pod failed, looking it up should succeed since we leave it around for debugging. + outcome = assert.NoError(t, err) + } + + return outcome +} + +// Simulates a pod being scheduled and reaching various states. Verifies that +// the target MachineConfigPool reaches the expected states as it goes. +func assertMCPFollowsBuildPodStatus(ctx context.Context, t *testing.T, cs *Clients, mcp *mcfgv1.MachineConfigPool, endingPhase corev1.PodPhase) bool { //nolint:unparam // This param is actually used. + t.Helper() + + var outcome bool + + defer func() { + assert.True(t, outcome) + }() + + // Each of the various pod phases we're interested in. + podPhases := []corev1.PodPhase{ + corev1.PodPending, + corev1.PodRunning, + endingPhase, + } + + // Each pod phase is correllated to a MachineConfigPoolConditionType. + podPhaseToMCPCondition := map[corev1.PodPhase]mcfgv1.MachineConfigPoolConditionType{ + corev1.PodPending: mcfgv1.MachineConfigPoolBuildPending, + corev1.PodRunning: mcfgv1.MachineConfigPoolBuilding, + corev1.PodFailed: mcfgv1.MachineConfigPoolBuildFailed, + corev1.PodSucceeded: mcfgv1.MachineConfigPoolBuildSuccess, + } + + // Determine if the MachineConfigPool should have a reference to the build pod. + shouldHaveBuildPodRef := map[corev1.PodPhase]bool{ + corev1.PodPending: true, + corev1.PodRunning: true, + corev1.PodFailed: true, + corev1.PodSucceeded: false, + } + + ibr := newImageBuildRequest(mcp) + buildPodName := ibr.getBuildName() + + // Wait for the build pod to be created. + outcome = assertBuildPodIsCreated(ctx, t, cs, ibr) + if !outcome { + return outcome + } + + outcome = assertConfigMapsCreated(ctx, t, cs, ibr) + if !outcome { + return false + } + + // Cycle through each of the build pod phases. + for _, phase := range podPhases { + // Get the build pod by name. + buildPod, err := cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(ctx, buildPodName, metav1.GetOptions{}) + require.NoError(t, err) + + // Set the pod phase and update it. + buildPod.Status.Phase = phase + + // If we've reached the successful pod phase, create the ConfigMap that the + // build pod does which has the resulting image digest. + if phase == corev1.PodSucceeded { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ibr.getDigestConfigMapName(), + Namespace: ctrlcommon.MCONamespace, + }, + Data: map[string]string{ + "digest": expectedImageSHA, + }, + } + _, cmErr := cs.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Create(ctx, cm, metav1.CreateOptions{}) + require.NoError(t, cmErr) + } + + _, err = cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Update(ctx, buildPod, metav1.UpdateOptions{}) + require.NoError(t, err) + + // Look up the expected MCP condition for our current pod phase. + expectedMCPCondition := podPhaseToMCPCondition[phase] + + // Look up the expected build pod condition for our current pod phase. + expectedBuildPodRefPresence := shouldHaveBuildPodRef[phase] + + var targetPool *mcfgv1.MachineConfigPool + + // Wait for the MCP condition to reach the expected state. + outcome = assertMachineConfigPoolReachesState(ctx, t, cs, mcp.Name, func(mcp *mcfgv1.MachineConfigPool) bool { + targetPool = mcp + return mcfgv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, expectedMCPCondition) && + expectedBuildPodRefPresence == machineConfigPoolHasBuildRef(mcp) && + machineConfigPoolHasMachineConfigRefs(mcp) + }) + + if !outcome { + spew.Dump(targetPool) + t.Logf("Has expected condition (%s) for phase (%s)? %v", expectedMCPCondition, phase, mcfgv1.IsMachineConfigPoolConditionTrue(targetPool.Status.Conditions, expectedMCPCondition)) + t.Logf("Has ref? %v. Expected: %v. Actual: %v.", expectedBuildPodRefPresence == machineConfigPoolHasBuildRef(targetPool), expectedBuildPodRefPresence, machineConfigPoolHasBuildRef(targetPool)) + t.Logf("Has MachineConfig refs? %v", machineConfigPoolHasMachineConfigRefs(targetPool)) + return false + } + } + + // Find out what happened to the build pod. + _, err := cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(ctx, buildPodName, metav1.GetOptions{}) + switch endingPhase { + case corev1.PodSucceeded: + + // If the build pod was successful, looking it up should fail because it should have been deleted. + outcome = assert.Error(t, err) + case corev1.PodFailed: + // If the build pod failed, looking it up should succeed since we leave it around for debugging. + outcome = assert.NoError(t, err) + } + + return outcome +} + +// Dumps all the objects within each of the fake clients to a YAML file for easy debugging. +func dumpObjects(ctx context.Context, t *testing.T, cs *Clients, filenamePrefix string) { + if cs.mcfgclient != nil { + mcp, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigPools().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, mcp, filenamePrefix+"-machineconfigpools.yaml") + + machineconfigs, err := cs.mcfgclient.MachineconfigurationV1().MachineConfigs().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, machineconfigs, filenamePrefix+"-machineconfigs.yaml") + } + + if cs.kubeclient != nil { + pods, err := cs.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, pods, filenamePrefix+"-pods.yaml") + + configmaps, err := cs.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + dumpToYAMLFile(t, configmaps, filenamePrefix+"-configmaps.yaml") + } + + if cs.buildclient != nil { + buildconfigs, err := cs.buildclient.BuildV1().BuildConfigs(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + dumpToYAMLFile(t, buildconfigs, filenamePrefix+"-buildconfigs.yaml") + + builds, err := cs.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + dumpToYAMLFile(t, builds, filenamePrefix+"-builds.yaml") + } +} + +// Dumps the provided object to the given filename. +func dumpToYAMLFile(t *testing.T, obj interface{}, filename string) { + out, err := yaml.Marshal(obj) + require.NoError(t, err) + + filename = strings.ReplaceAll(filename, "/", "_") + + require.NoError(t, ioutil.WriteFile(filename, out, 0755)) +} diff --git a/pkg/controller/build/helpers.go b/pkg/controller/build/helpers.go new file mode 100644 index 0000000000..9e12ccb764 --- /dev/null +++ b/pkg/controller/build/helpers.go @@ -0,0 +1,221 @@ +package build + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "encoding/json" + "fmt" + "io" + + "github.com/containers/image/v5/docker/reference" + "github.com/opencontainers/go-digest" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + k8stypes "k8s.io/apimachinery/pkg/types" +) + +const ( + canonicalSecretSuffix string = "-canonical" +) + +// Compresses and base-64 encodes a given byte array. Ideal for loading an +// arbitrary byte array into a ConfigMap or Secret. +func compressAndEncode(payload []byte) (*bytes.Buffer, error) { + out := bytes.NewBuffer(nil) + + if len(payload) == 0 { + return out, nil + } + + // We need to base64-encode our gzipped data so we can marshal it in and out + // of a string since ConfigMaps and Secrets expect a textual representation. + base64Enc := base64.NewEncoder(base64.StdEncoding, out) + defer base64Enc.Close() + + err := compress(bytes.NewBuffer(payload), base64Enc) + if err != nil { + return nil, fmt.Errorf("could not compress and encode payload: %w", err) + } + + err = base64Enc.Close() + if err != nil { + return nil, fmt.Errorf("could not close base64 encoder: %w", err) + } + + return out, err +} + +// Compresses a given io.Reader to a given io.Writer +func compress(r io.Reader, w io.Writer) error { + gz, err := gzip.NewWriterLevel(w, gzip.BestCompression) + if err != nil { + return fmt.Errorf("could not initialize gzip writer: %w", err) + } + + defer gz.Close() + + if _, err := io.Copy(gz, r); err != nil { + return fmt.Errorf("could not compress payload: %w", err) + } + + if err := gz.Close(); err != nil { + return fmt.Errorf("could not close gzipwriter: %w", err) + } + + return nil +} + +// Replaces any tags on the image pullspec with the provided image digest. +func parseImagePullspecWithDigest(pullspec string, imageDigest digest.Digest) (string, error) { + named, err := reference.ParseNamed(pullspec) + if err != nil { + return "", err + } + + canonical, err := reference.WithDigest(reference.TrimNamed(named), imageDigest) + if err != nil { + return "", err + } + + return canonical.String(), nil +} + +// Parses an image pullspec from a string and an image SHA and replaces any +// tags on the pullspec with the provided image SHA. +func parseImagePullspec(pullspec, imageSHA string) (string, error) { + imageDigest, err := digest.Parse(imageSHA) + if err != nil { + return "", err + } + + return parseImagePullspecWithDigest(pullspec, imageDigest) +} + +// Converts a legacy Docker pull secret into a more modern representation. +// Essentially, it converts {"registry.hostname.com": {"username": "user"...}} +// into {"auths": {"registry.hostname.com": {"username": "user"...}}}. If it +// encounters a pull secret already in this configuration, it will return the +// input secret as-is. Returns either the supplied data or the newly-configured +// representation of said data, a boolean to indicate whether it was converted, +// and any errors resulting from the conversion process. +func canonicalizePullSecretBytes(secretBytes []byte) ([]byte, bool, error) { + type newStyleAuth struct { + Auths map[string]interface{} `json:"auths,omitempty"` + } + + // Try marshaling the new-style secret first: + newStyleDecoded := &newStyleAuth{} + if err := json.Unmarshal(secretBytes, newStyleDecoded); err != nil { + return nil, false, fmt.Errorf("could not decode new-style pull secret: %w", err) + } + + // We have an new-style secret, so we can just return here. + if len(newStyleDecoded.Auths) != 0 { + return secretBytes, false, nil + } + + // We need to convert the legacy-style secret to the new-style. + oldStyleDecoded := map[string]interface{}{} + if err := json.Unmarshal(secretBytes, &oldStyleDecoded); err != nil { + return nil, false, fmt.Errorf("could not decode legacy-style pull secret: %w", err) + } + + out, err := json.Marshal(&newStyleAuth{ + Auths: oldStyleDecoded, + }) + + return out, err == nil, err +} + +// Performs the above operation upon a given secret, potentially creating a new +// secret for insertion with the suffix '-canonical' on its name. +func canonicalizePullSecret(secret *corev1.Secret) (*corev1.Secret, error) { + secret = secret.DeepCopy() + + key, err := getPullSecretKey(secret) + if err != nil { + return nil, err + } + + secretBytes, ok := secret.Data[key] + if !ok { + return nil, fmt.Errorf("could not locate key %q in %s", key, secret.Name) + } + + canonicalizedSecretBytes, canonicalized, err := canonicalizePullSecretBytes(secretBytes) + if err != nil { + return nil, err + } + + if !canonicalized { + return secret, nil + } + + out := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s%s", secret.Name, canonicalSecretSuffix), + Namespace: secret.Namespace, + }, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: canonicalizedSecretBytes, + }, + Type: corev1.SecretTypeDockerConfigJson, + } + + return out, nil +} + +// Looks up a given secret key for a given secret type and validates that the +// key is present and the secret is a non-zero length. Returns an error if it +// is the incorrect secret type, missing the appropriate key, or the secret is +// a zero-length. +func getPullSecretKey(secret *corev1.Secret) (string, error) { + if secret.Type != corev1.SecretTypeDockerConfigJson && secret.Type != corev1.SecretTypeDockercfg { + return "", fmt.Errorf("unknown secret type %s", secret.Type) + } + + secretTypes := map[corev1.SecretType]string{ + corev1.SecretTypeDockercfg: corev1.DockerConfigKey, + corev1.SecretTypeDockerConfigJson: corev1.DockerConfigJsonKey, + } + + key := secretTypes[secret.Type] + + val, ok := secret.Data[key] + if !ok { + return "", fmt.Errorf("missing %q in %s", key, secret.Name) + } + + if len(val) == 0 { + return "", fmt.Errorf("empty value %q in %s", key, secret.Name) + } + + return key, nil +} + +// Converts a given Kube object into an object reference. +func toObjectRef(obj interface { + GetName() string + GetNamespace() string + GetUID() k8stypes.UID + GetObjectKind() schema.ObjectKind +}) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: obj.GetObjectKind().GroupVersionKind().Kind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + UID: obj.GetUID(), + } +} + +// Returns any supplied error except ones that match k8serrors.IsNotFound(). +func ignoreIsNotFoundErr(err error) error { + if err != nil && !k8serrors.IsNotFound(err) { + return err + } + + return nil +} diff --git a/pkg/controller/build/helpers_test.go b/pkg/controller/build/helpers_test.go new file mode 100644 index 0000000000..0f06a0a41b --- /dev/null +++ b/pkg/controller/build/helpers_test.go @@ -0,0 +1,149 @@ +package build + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Tests that a given image pullspec with a tag and SHA is correctly substituted. +func TestParseImagePullspec(t *testing.T) { + t.Parallel() + + out, err := parseImagePullspec(expectedImagePullspecWithTag, expectedImageSHA) + assert.NoError(t, err) + assert.Equal(t, expectedImagePullspecWithSHA, out) +} + +// Tests that pull secrets are canonicalized. In other words, converted from +// the legacy-style pull secret to the new-style secret. +func TestCanonicalizePullSecret(t *testing.T) { + t.Parallel() + + legacySecret := `{"registry.hostname.com": {"username": "user", "password": "s3kr1t", "auth": "s00pers3kr1t", "email": "user@hostname.com"}}` + + newSecret := `{"auths":` + legacySecret + `}` + + testCases := []struct { + name string + inputSecret *corev1.Secret + expectCanonical bool + expectError bool + }{ + { + name: "new-style secret dockerconfigjson", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: []byte(newSecret), + }, + Type: corev1.SecretTypeDockerConfigJson, + }, + expectCanonical: false, + }, + { + name: "new-style secret dockercfg", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + corev1.DockerConfigKey: []byte(newSecret), + }, + Type: corev1.SecretTypeDockercfg, + }, + expectCanonical: false, + }, + { + name: "legacy secret dockercfg", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + corev1.DockerConfigKey: []byte(legacySecret), + }, + Type: corev1.SecretTypeDockercfg, + }, + expectCanonical: true, + }, + { + name: "legacy secret dockerconfigjson", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: []byte(legacySecret), + }, + Type: corev1.SecretTypeDockerConfigJson, + }, + expectCanonical: true, + }, + { + name: "empty secret", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + corev1.DockerConfigKey: {}, + }, + Type: corev1.SecretTypeDockercfg, + }, + expectError: true, + }, + { + name: "unknown key secret", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + "unknown-key": []byte(newSecret), + }, + }, + expectError: true, + }, + { + name: "unknown secret type", + inputSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + }, + Data: map[string][]byte{ + corev1.DockerConfigKey: []byte(newSecret), + }, + Type: corev1.SecretTypeOpaque, + }, + expectError: true, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + out, err := canonicalizePullSecret(testCase.inputSecret) + if testCase.expectError { + assert.Error(t, err) + return + } else { + assert.NoError(t, err) + } + + if testCase.expectCanonical { + assert.Contains(t, out.Name, "canonical") + } + + for _, val := range out.Data { + assert.JSONEq(t, newSecret, string(val)) + } + }) + } +} diff --git a/pkg/controller/build/image_build_controller.go b/pkg/controller/build/image_build_controller.go new file mode 100644 index 0000000000..ad272c06b4 --- /dev/null +++ b/pkg/controller/build/image_build_controller.go @@ -0,0 +1,335 @@ +package build + +import ( + "context" + "fmt" + "strings" + "time" + + buildv1 "github.com/openshift/api/build/v1" + buildlistersv1 "github.com/openshift/client-go/build/listers/build/v1" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// Controller defines the build controller. +type ImageBuildController struct { + *Clients + *informers + + eventRecorder record.EventRecorder + + // The function to call whenever we've encountered a Build. This function is + // responsible for examining the Build to determine what state its in and map + // that state to the appropriate MachineConfigPool object. + buildHandler func(*buildv1.Build) error + + syncHandler func(pod string) error + enqueueBuild func(*buildv1.Build) + + buildLister buildlistersv1.BuildLister + + buildListerSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface + + config BuildControllerConfig +} + +var _ ImageBuilder = (*ImageBuildController)(nil) + +// Returns a new image build controller. +func newImageBuildController( + ctrlConfig BuildControllerConfig, + clients *Clients, + buildHandler func(*buildv1.Build) error, +) *ImageBuildController { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: clients.kubeclient.CoreV1().Events("")}) + + ctrl := &ImageBuildController{ + Clients: clients, + informers: newInformers(clients), + eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineosbuilder-imagebuildcontroller"}), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineosbuilder-imagebuildcontroller"), + config: ctrlConfig, + buildHandler: buildHandler, + } + + // As an aside, why doesn't the constructor here set up all the informers? + ctrl.buildInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addBuild, + UpdateFunc: ctrl.updateBuild, + DeleteFunc: ctrl.deleteBuild, + }) + + ctrl.buildLister = ctrl.buildInformer.Lister() + ctrl.buildListerSynced = ctrl.buildInformer.Informer().HasSynced + + ctrl.syncHandler = ctrl.syncBuild + ctrl.enqueueBuild = ctrl.enqueueDefault + + return ctrl +} + +func (ctrl *ImageBuildController) enqueue(build *buildv1.Build) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(build) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", build, err)) + return + } + + ctrl.queue.Add(key) +} + +func (ctrl *ImageBuildController) enqueueRateLimited(build *buildv1.Build) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(build) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", build, err)) + return + } + + ctrl.queue.AddRateLimited(key) +} + +// enqueueAfter will enqueue a build after the provided amount of time. +func (ctrl *ImageBuildController) enqueueAfter(build *buildv1.Build, after time.Duration) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(build) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", build, err)) + return + } + + ctrl.queue.AddAfter(key, after) +} + +// enqueueDefault calls a default enqueue function +func (ctrl *ImageBuildController) enqueueDefault(build *buildv1.Build) { + ctrl.enqueueAfter(build, ctrl.config.UpdateDelay) +} + +// Syncs Builds. +func (ctrl *ImageBuildController) syncBuild(key string) error { //nolint:dupl // This does have commonality with the PodBuildController. + start := time.Now() + defer func() { + klog.Infof("Finished syncing pod %s: %s", key, time.Since(start)) + }() + klog.Infof("Started syncing pod %s", key) + + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + // TODO: Why do I need to set the namespace here? + build, err := ctrl.buildLister.Builds(ctrlcommon.MCONamespace).Get(name) + if k8serrors.IsNotFound(err) { + klog.V(2).Infof("Build %v has been deleted", key) + return nil + } + if err != nil { + return err + } + + build, err = ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), build.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !hasAllRequiredOSBuildLabels(build.Labels) { + klog.Infof("Ignoring non-OS image build %s", build.Name) + return nil + } + + if err := ctrl.buildHandler(build); err != nil { + return fmt.Errorf("unable to update with build status: %w", err) + } + + klog.Infof("Updated MachineConfigPool with build status. Build %s in %s", build.Name, build.Status.Phase) + + return nil +} + +// Starts the Image Build Controller. +func (ctrl *ImageBuildController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer ctrl.queue.ShutDown() + + ctrl.informers.start(ctx) + + if !cache.WaitForCacheSync(ctx.Done(), ctrl.buildListerSynced) { + return + } + + klog.Info("Starting MachineOSBuilder-ImageBuildController") + defer klog.Info("Shutting down MachineOSBuilder-ImageBuildController") + + for i := 0; i < workers; i++ { + go wait.Until(ctrl.worker, time.Second, ctx.Done()) + } + + <-ctx.Done() +} + +// Gets the final image pullspec. In this case, we can interrogate the Build +// object for this information. +func (ctrl *ImageBuildController) FinalPullspec(pool *mcfgv1.MachineConfigPool) (string, error) { + buildName := newImageBuildRequest(pool).getBuildName() + + build, err := ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("could not get build %s for pool %s: %w", buildName, pool.Name, err) + } + + // Get the image digest from the completed build and replace the tag with + // the digest. + if build.Status.OutputDockerImageReference == "" { + return "", fmt.Errorf("no image reference outputted") + } + + if build.Status.Output.To.ImageDigest == "" { + return "", fmt.Errorf("no image digest found") + } + + return parseImagePullspec(build.Status.OutputDockerImageReference, build.Status.Output.To.ImageDigest) +} + +// Deletes the underlying Build object. +func (ctrl *ImageBuildController) DeleteBuildObject(pool *mcfgv1.MachineConfigPool) error { + buildName := newImageBuildRequest(pool).getBuildName() + return ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Delete(context.TODO(), buildName, metav1.DeleteOptions{}) +} + +// Determines if a build is currently running by looking for a corresponding Build. +func (ctrl *ImageBuildController) IsBuildRunning(pool *mcfgv1.MachineConfigPool) (bool, error) { + buildName := newImageBuildRequest(pool).getBuildName() + + // First check if we have a build in progress for this MachineConfigPool and rendered config. + _, err := ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return false, err + } + + return err == nil, nil +} + +// Starts a new build, assuming one is not found first. In that case, it +// returns an object reference to the preexisting Build object. +func (ctrl *ImageBuildController) StartBuild(ibr ImageBuildRequest) (*corev1.ObjectReference, error) { + targetMC := ibr.Pool.Spec.Configuration.Name + + buildName := ibr.getBuildName() + + // TODO: Find a constant for this: + if !strings.HasPrefix(targetMC, "rendered-") { + return nil, fmt.Errorf("%s is not a rendered MachineConfig", targetMC) + } + + // First check if we have a build in progress for this MachineConfigPool and rendered config. + build, err := ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Get(context.TODO(), buildName, metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return nil, err + } + + // This means we found a preexisting build build. + if build != nil && err == nil && hasAllRequiredOSBuildLabels(build.Labels) { + klog.Infof("Found preexisting OS image build (%s) for pool %s", build.Name, ibr.Pool.Name) + return toObjectRef(build), nil + } + + klog.Infof("Starting build for pool %s", ibr.Pool.Name) + klog.Infof("Build name: %s", buildName) + klog.Infof("Final image will be pushed to %q, using secret %q", ibr.FinalImage.Pullspec, ibr.FinalImage.PullSecret.Name) + + build, err = ctrl.buildclient.BuildV1().Builds(ctrlcommon.MCONamespace).Create(context.TODO(), ibr.toBuild(), metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("could not create OS image build: %w", err) + } + + klog.Infof("Build started for pool %s in %s!", ibr.Pool.Name, build.Name) + + return toObjectRef(build), nil +} + +// Fires whenever a Build is added. +func (ctrl *ImageBuildController) addBuild(obj interface{}) { + build := obj.(*buildv1.Build).DeepCopy() + klog.V(4).Infof("Adding Build %s. Is OS image build? %v", build.Name, hasAllRequiredOSBuildLabels(build.Labels)) + if hasAllRequiredOSBuildLabels(build.Labels) { + ctrl.enqueueBuild(build) + } +} + +// Fires whenever a Build is updated. +func (ctrl *ImageBuildController) updateBuild(_, curObj interface{}) { + curBuild := curObj.(*buildv1.Build).DeepCopy() + + isOSImageBuild := hasAllRequiredOSBuildLabels(curBuild.Labels) + + klog.Infof("Updating build %s. Is OS image build? %v", curBuild.Name, isOSImageBuild) + + // Ignore non-OS image builds. + // TODO: Figure out if we can add the filter criteria onto the lister. + if !isOSImageBuild { + return + } + + klog.Infof("Build %s updated", curBuild.Name) + + ctrl.enqueueBuild(curBuild) +} + +func (ctrl *ImageBuildController) handleErr(err error, key interface{}) { + if err == nil { + ctrl.queue.Forget(key) + return + } + + if ctrl.queue.NumRequeues(key) < ctrl.config.MaxRetries { + klog.V(2).Infof("Error syncing build %v: %v", key, err) + ctrl.queue.AddRateLimited(key) + return + } + + utilruntime.HandleError(err) + klog.V(2).Infof("Dropping build %q out of the queue: %v", key, err) + ctrl.queue.Forget(key) + ctrl.queue.AddAfter(key, 1*time.Minute) +} + +func (ctrl *ImageBuildController) deleteBuild(obj interface{}) { + build := obj.(*buildv1.Build).DeepCopy() + klog.V(4).Infof("Deleting Build %s. Is OS image build? %v", build.Name, hasAllRequiredOSBuildLabels(build.Labels)) + ctrl.enqueueBuild(build) +} + +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncHandler is never invoked concurrently with the same key. +func (ctrl *ImageBuildController) worker() { + for ctrl.processNextWorkItem() { + } +} + +func (ctrl *ImageBuildController) processNextWorkItem() bool { + key, quit := ctrl.queue.Get() + if quit { + return false + } + defer ctrl.queue.Done(key) + + err := ctrl.syncHandler(key.(string)) + ctrl.handleErr(err, key) + + return true +} diff --git a/pkg/controller/build/image_build_request.go b/pkg/controller/build/image_build_request.go new file mode 100644 index 0000000000..801d8d15a7 --- /dev/null +++ b/pkg/controller/build/image_build_request.go @@ -0,0 +1,619 @@ +package build + +import ( + _ "embed" + "encoding/json" + "fmt" + "strings" + "text/template" + + buildv1 "github.com/openshift/api/build/v1" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/helpers" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + mcPoolAnnotation string = "machineconfiguration.openshift.io/pool" + machineConfigJSONFilename string = "machineconfig.json.gz" + buildahImagePullspec string = "quay.io/buildah/stable:latest" +) + +//go:embed assets/Dockerfile.on-cluster-build-template +var dockerfileTemplate string + +//go:embed assets/wait.sh +var waitScript string + +//go:embed assets/buildah-build.sh +var buildahBuildScript string + +//go:embed assets/podman-build.sh +var podmanBuildScript string + +// Represents a given image pullspec and the location of the pull secret. +type ImageInfo struct { + // The pullspec for a given image (e.g., registry.hostname.com/orp/repo:tag) + Pullspec string + // The name of the K8s secret required for pulling the aforementioned image. + PullSecret corev1.LocalObjectReference +} + +// Represents the request to build a layered OS image. +type ImageBuildRequest struct { + // The target MachineConfigPool + Pool *mcfgv1.MachineConfigPool + // The base OS image (derived from the machine-config-osimageurl ConfigMap) + BaseImage ImageInfo + // The extensions image (derived from the machine-config-osimageurl ConfigMap) + ExtensionsImage ImageInfo + // The final OS image (desired from the on-cluster-build-config ConfigMap) + FinalImage ImageInfo + // The OpenShift release version (derived from the machine-config-osimageurl ConfigMap) + ReleaseVersion string +} + +// Constructs a simple ImageBuildRequest. +func newImageBuildRequest(pool *mcfgv1.MachineConfigPool) ImageBuildRequest { + return ImageBuildRequest{ + Pool: pool.DeepCopy(), + } +} + +// Populates the final image info from the on-cluster-build-config ConfigMap. +func newFinalImageInfo(onClusterBuildConfigMap *corev1.ConfigMap) ImageInfo { + return ImageInfo{ + Pullspec: onClusterBuildConfigMap.Data[finalImagePullspecConfigKey], + PullSecret: corev1.LocalObjectReference{ + Name: onClusterBuildConfigMap.Data[finalImagePushSecretNameConfigKey], + }, + } +} + +// Populates the base image info from both the on-cluster-build-config and +// machine-config-osimageurl ConfigMaps. +func newBaseImageInfo(osImageURLConfigMap, onClusterBuildConfigMap *corev1.ConfigMap) ImageInfo { + return ImageInfo{ + Pullspec: osImageURLConfigMap.Data[baseOSContainerImageConfigKey], + PullSecret: corev1.LocalObjectReference{ + Name: onClusterBuildConfigMap.Data[baseImagePullSecretNameConfigKey], + }, + } +} + +// Populates the extensions image info from both the on-cluster-build-config +// and machine-config-osimageurl ConfigMaps. +func newExtensionsImageInfo(osImageURLConfigMap, onClusterBuildConfigMap *corev1.ConfigMap) ImageInfo { + return ImageInfo{ + Pullspec: osImageURLConfigMap.Data[baseOSExtensionsContainerImageConfigKey], + PullSecret: corev1.LocalObjectReference{ + Name: onClusterBuildConfigMap.Data[baseImagePullSecretNameConfigKey], + }, + } +} + +// Constructs an ImageBuildRequest with all of the images populated from ConfigMaps +func newImageBuildRequestWithConfigMap(pool *mcfgv1.MachineConfigPool, osImageURLConfigMap, onClusterBuildConfigMap *corev1.ConfigMap) ImageBuildRequest { + return ImageBuildRequest{ + Pool: pool.DeepCopy(), + BaseImage: newBaseImageInfo(osImageURLConfigMap, onClusterBuildConfigMap), + FinalImage: newFinalImageInfo(onClusterBuildConfigMap), + ExtensionsImage: newExtensionsImageInfo(osImageURLConfigMap, onClusterBuildConfigMap), + ReleaseVersion: osImageURLConfigMap.Data[releaseVersionConfigKey], + } +} + +// Renders our Dockerfile and injects it into a ConfigMap for consumption by the image builder. +func (i ImageBuildRequest) dockerfileToConfigMap() (*corev1.ConfigMap, error) { + dockerfile, err := i.renderDockerfile() + if err != nil { + return nil, err + } + + configmap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: i.getObjectMeta(i.getDockerfileConfigMapName()), + Data: map[string]string{ + "Dockerfile": dockerfile, + }, + } + + return configmap, nil +} + +// Stuffs a given MachineConfig into a ConfigMap, gzipping and base64-encoding it. +func (i ImageBuildRequest) toConfigMap(mc *mcfgv1.MachineConfig) (*corev1.ConfigMap, error) { + out, err := json.Marshal(mc) + if err != nil { + return nil, fmt.Errorf("could not encode MachineConfig %s: %w", mc.Name, err) + } + + // TODO: Check for size here and determine if its too big. ConfigMaps and + // Secrets have a size limit of 1 MB. Compressing and encoding the + // MachineConfig provides us with additional headroom. However, if the + // MachineConfig grows large enough, we may need to do something more + // involved. + compressed, err := compressAndEncode(out) + if err != nil { + return nil, fmt.Errorf("could not compress or encode MachineConfig %s: %w", mc.Name, err) + } + + configmap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: i.getObjectMeta(i.getMCConfigMapName()), + Data: map[string]string{ + machineConfigJSONFilename: compressed.String(), + }, + } + + return configmap, nil +} + +// Renders our Dockerfile template. +// +// TODO: Figure out how to parse the Dockerfile using +// https://github.com/openshift/imagebuilder/tree/master/dockerfile/parser to +// ensure that we've generated a valid Dockerfile. +// +// TODO: Figure out how to programatically generate the Dockerfile using a +// higher-level abstraction than just naïvely rendering a text template and +// hoping for the best. +func (i ImageBuildRequest) renderDockerfile() (string, error) { + tmpl, err := template.New("dockerfile").Parse(dockerfileTemplate) + if err != nil { + return "", err + } + + out := &strings.Builder{} + + if err := tmpl.Execute(out, i); err != nil { + return "", err + } + + return out.String(), nil +} + +// Creates an OpenShift Image Builder build object prewired with all ConfigMaps +// / Secrets / etc. +func (i ImageBuildRequest) toBuild() *buildv1.Build { + skipLayers := buildv1.ImageOptimizationSkipLayers + + // The Build API requires the Dockerfile field to be set, even if you + // override it via a ConfigMap. + dockerfile := "FROM scratch" + + return &buildv1.Build{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: i.getObjectMeta(i.getBuildName()), + Spec: buildv1.BuildSpec{ + CommonSpec: buildv1.CommonSpec{ + // TODO: We may need to configure a Build Input here so we can wire up + // the pull secrets for the base OS image and the extensions image. + Source: buildv1.BuildSource{ + Type: buildv1.BuildSourceDockerfile, + Dockerfile: &dockerfile, + ConfigMaps: []buildv1.ConfigMapBuildSource{ + { + // Provides the rendered MachineConfig in a gzipped / + // base64-encoded format. + ConfigMap: corev1.LocalObjectReference{ + Name: i.getMCConfigMapName(), + }, + DestinationDir: "machineconfig", + }, + { + // Provides the rendered Dockerfile. + ConfigMap: corev1.LocalObjectReference{ + Name: i.getDockerfileConfigMapName(), + }, + }, + }, + }, + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{ + // Squashing layers is good as long as it doesn't cause problems with what + // the users want to do. It says "some syntax is not supported" + ImageOptimizationPolicy: &skipLayers, + }, + Type: buildv1.DockerBuildStrategyType, + }, + Output: buildv1.BuildOutput{ + To: &corev1.ObjectReference{ + Name: i.FinalImage.Pullspec, + Kind: "DockerImage", + }, + PushSecret: &i.FinalImage.PullSecret, + ImageLabels: []buildv1.ImageLabel{ + {Name: "io.openshift.machineconfig.pool", Value: i.Pool.Name}, + }, + }, + }, + }, + } +} + +// Creates a custom image build pod to build the final OS image with all +// ConfigMaps / Secrets / etc. wired into it. +func (i ImageBuildRequest) toBuildPod() *corev1.Pod { + return i.toBuildahPod() +} + +// This reflects an attempt to use Podman to perform the OS build. +// Unfortunately, it was difficult to get this to run unprivileged and I was +// not able to figure out a solution. Nevertheless, I will leave it here for +// posterity. +func (i ImageBuildRequest) toPodmanPod() *corev1.Pod { + env := []corev1.EnvVar{ + { + Name: "DIGEST_CONFIGMAP_NAME", + Value: i.getDigestConfigMapName(), + }, + { + Name: "HOME", + Value: "/tmp", + }, + { + Name: "TAG", + Value: i.FinalImage.Pullspec, + }, + { + Name: "BASE_IMAGE_PULL_CREDS", + Value: "/tmp/base-image-pull-creds/config.json", + }, + { + Name: "FINAL_IMAGE_PUSH_CREDS", + Value: "/tmp/final-image-push-creds/config.json", + }, + } + + command := []string{"/bin/bash", "-c"} + + volumeMounts := []corev1.VolumeMount{ + { + Name: "machineconfig", + MountPath: "/tmp/machineconfig", + }, + { + Name: "dockerfile", + MountPath: "/tmp/dockerfile", + }, + { + Name: "base-image-pull-creds", + MountPath: "/tmp/base-image-pull-creds", + }, + { + Name: "final-image-push-creds", + MountPath: "/tmp/final-image-push-creds", + }, + { + Name: "done", + MountPath: "/tmp/done", + }, + } + + // See: https://access.redhat.com/solutions/6964609 + // TL;DR: Trying to get podman / buildah to run in an unprivileged container + // is quite involved. However, OpenShift Builder runs in privileged + // containers, which sets a precedent. + // This requires that one run: $ oc adm policy add-scc-to-user -z machine-os-builder privileged + securityContext := &corev1.SecurityContext{ + Privileged: helpers.BoolToPtr(true), + SeccompProfile: &corev1.SeccompProfile{ + Type: "Localhost", + LocalhostProfile: helpers.StrToPtr("profiles/unshare.json"), + }, + } + + // TODO: We need pull creds with permissions to pull the base image. By + // default, none of the MCO pull secrets can directly pull it. We can use the + // pull-secret creds from openshift-config to do that, though we'll need to + // mirror those creds into the MCO namespace. The operator portion of the MCO + // has some logic to detect whenever that secret changes. + return &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: i.getObjectMeta(i.getBuildName()), + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + // This container performs the image build / push process. + // Additionally, it takes the digestfile which podman creates, which + // contains the SHA256 from the container registry, and stores this + // in a ConfigMap which is consumed after the pod stops. + Name: "image-build", + Image: i.BaseImage.Pullspec, + Env: env, + Command: append(command, podmanBuildScript), + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: securityContext, + VolumeMounts: volumeMounts, + }, + }, + // We probably cannot count on the 'builder' service account being + // present in the future. If we cannot use the builder service account + // means that we'll need to: + // 1. Create a SecurityContextConstraint. + // 2. Additional RBAC / ClusterRole / etc. work to suss this out. + ServiceAccountName: "machine-os-builder", + Volumes: []corev1.Volume{ // nolint:dupl // I don't want to deduplicate this yet since there are still some unknowns. + { + // Provides the rendered Dockerfile. + Name: "dockerfile", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: i.getDockerfileConfigMapName(), + }, + }, + }, + }, + { + // Provides the rendered MachineConfig in a gzipped / base64-encoded + // format. + Name: "machineconfig", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: i.getMCConfigMapName(), + }, + }, + }, + }, + { + // Provides the credentials needed to pull the base OS image. + Name: "base-image-pull-creds", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: i.BaseImage.PullSecret.Name, + Items: []corev1.KeyToPath{ + { + Key: corev1.DockerConfigJsonKey, + Path: "config.json", + }, + }, + }, + }, + }, + { + // Provides the credentials needed to push the final OS image. + Name: "final-image-push-creds", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: i.FinalImage.PullSecret.Name, + Items: []corev1.KeyToPath{ + { + Key: corev1.DockerConfigJsonKey, + Path: "config.json", + }, + }, + }, + }, + }, + { + // Provides a way for the "image-build" container to signal that it + // finished so that the "wait-for-done" container can retrieve the + // iamge SHA. + Name: "done", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }, + }, + }, + } +} + +// We're able to run the Buildah image in an unprivileged pod provided that the +// machine-os-builder service account has the anyuid security constraint +// context enabled to allow us to use UID 1000, which maps to the UID within +// the official Buildah image. +// nolint:dupl // I don't want to deduplicate this yet since there are still some unknowns. +func (i ImageBuildRequest) toBuildahPod() *corev1.Pod { + env := []corev1.EnvVar{ + { + Name: "DIGEST_CONFIGMAP_NAME", + Value: i.getDigestConfigMapName(), + }, + { + Name: "HOME", + Value: "/home/build", + }, + { + Name: "TAG", + Value: i.FinalImage.Pullspec, + }, + { + Name: "BASE_IMAGE_PULL_CREDS", + Value: "/tmp/base-image-pull-creds/config.json", + }, + { + Name: "FINAL_IMAGE_PUSH_CREDS", + Value: "/tmp/final-image-push-creds/config.json", + }, + } + + var uid int64 = 1000 + var gid int64 = 1000 + + securityContext := &corev1.SecurityContext{ + RunAsUser: &uid, + RunAsGroup: &gid, + } + + command := []string{"/bin/bash", "-c"} + + volumeMounts := []corev1.VolumeMount{ + { + Name: "machineconfig", + MountPath: "/tmp/machineconfig", + }, + { + Name: "dockerfile", + MountPath: "/tmp/dockerfile", + }, + { + Name: "base-image-pull-creds", + MountPath: "/tmp/base-image-pull-creds", + }, + { + Name: "final-image-push-creds", + MountPath: "/tmp/final-image-push-creds", + }, + { + Name: "done", + MountPath: "/tmp/done", + }, + } + + // TODO: We need pull creds with permissions to pull the base image. By + // default, none of the MCO pull secrets can directly pull it. We can use the + // pull-secret creds from openshift-config to do that, though we'll need to + // mirror those creds into the MCO namespace. The operator portion of the MCO + // has some logic to detect whenever that secret changes. + return &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: i.getObjectMeta(i.getBuildName()), + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + // This container performs the image build / push process. + Name: "image-build", + // TODO: Figure out how to not hard-code this here. + Image: buildahImagePullspec, + Env: env, + Command: append(command, buildahBuildScript), + ImagePullPolicy: corev1.PullAlways, + SecurityContext: securityContext, + VolumeMounts: volumeMounts, + }, + { + // This container waits for the aforementioned container to finish + // building so we can get the final image SHA. We do this by using + // the base OS image (which contains the "oc" binary) to create a + // ConfigMap from the digestfile that Buildah creates, which allows + // us to avoid parsing log files. + Name: "wait-for-done", + Env: env, + Command: append(command, waitScript), + Image: i.BaseImage.Pullspec, + ImagePullPolicy: corev1.PullAlways, + SecurityContext: securityContext, + VolumeMounts: volumeMounts, + }, + }, + ServiceAccountName: "machine-os-builder", + Volumes: []corev1.Volume{ + { + // Provides the rendered Dockerfile. + Name: "dockerfile", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: i.getDockerfileConfigMapName(), + }, + }, + }, + }, + { + // Provides the rendered MachineConfig in a gzipped / base64-encoded + // format. + Name: "machineconfig", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: i.getMCConfigMapName(), + }, + }, + }, + }, + { + // Provides the credentials needed to pull the base OS image. + Name: "base-image-pull-creds", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: i.BaseImage.PullSecret.Name, + Items: []corev1.KeyToPath{ + { + Key: corev1.DockerConfigJsonKey, + Path: "config.json", + }, + }, + }, + }, + }, + { + // Provides the credentials needed to push the final OS image. + Name: "final-image-push-creds", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: i.FinalImage.PullSecret.Name, + Items: []corev1.KeyToPath{ + { + Key: corev1.DockerConfigJsonKey, + Path: "config.json", + }, + }, + }, + }, + }, + { + // Provides a way for the "image-build" container to signal that it + // finished so that the "wait-for-done" container can retrieve the + // iamge SHA. + Name: "done", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + }, + }, + }, + }, + }, + } +} + +// Constructs a common metav1.ObjectMeta object with the namespace, labels, and annotations set. +func (i ImageBuildRequest) getObjectMeta(name string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: name, + Namespace: ctrlcommon.MCONamespace, + Labels: map[string]string{ + ctrlcommon.OSImageBuildPodLabel: "", + targetMachineConfigPoolLabel: i.Pool.Name, + desiredConfigLabel: i.Pool.Spec.Configuration.Name, + }, + Annotations: map[string]string{ + mcPoolAnnotation: "", + }, + } +} + +// Computes the Dockerfile ConfigMap name based upon the MachineConfigPool name. +func (i ImageBuildRequest) getDockerfileConfigMapName() string { + return fmt.Sprintf("dockerfile-%s", i.Pool.Spec.Configuration.Name) +} + +// Computes the MachineConfig ConfigMap name based upon the MachineConfigPool name. +func (i ImageBuildRequest) getMCConfigMapName() string { + return fmt.Sprintf("mc-%s", i.Pool.Spec.Configuration.Name) +} + +// Computes the build name based upon the MachineConfigPool name. +func (i ImageBuildRequest) getBuildName() string { + return fmt.Sprintf("build-%s", i.Pool.Spec.Configuration.Name) +} + +func (i ImageBuildRequest) getDigestConfigMapName() string { + return fmt.Sprintf("digest-%s", i.Pool.Spec.Configuration.Name) +} diff --git a/pkg/controller/build/image_build_request_test.go b/pkg/controller/build/image_build_request_test.go new file mode 100644 index 0000000000..fa8b4bcf85 --- /dev/null +++ b/pkg/controller/build/image_build_request_test.go @@ -0,0 +1,80 @@ +package build + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// Tests that Image Build Requests is constructed as expected and does a +// (mostly) smoke test of its methods. +func TestImageBuildRequest(t *testing.T) { + t.Parallel() + + mcp := newMachineConfigPool("worker", "rendered-worker-1") + + osImageURLConfigMap := getOSImageURLConfigMap() + onClusterBuildConfigMap := getOnClusterBuildConfigMap() + + ibr := newImageBuildRequestWithConfigMap(mcp, osImageURLConfigMap, onClusterBuildConfigMap) + + dockerfile, err := ibr.renderDockerfile() + assert.NoError(t, err) + + expectedDockerfileContents := []string{ + osImageURLConfigMap.Data[releaseVersionConfigKey], + osImageURLConfigMap.Data[baseOSContainerImageConfigKey], + osImageURLConfigMap.Data[baseOSExtensionsContainerImageConfigKey], + mcp.Name, + mcp.Spec.Configuration.Name, + machineConfigJSONFilename, + } + + for _, content := range expectedDockerfileContents { + assert.Contains(t, dockerfile, content) + } + + assert.NotNil(t, ibr.toBuild()) + assert.NotNil(t, ibr.toBuildPod()) + + dockerfileConfigmap, err := ibr.dockerfileToConfigMap() + assert.NoError(t, err) + assert.NotNil(t, dockerfileConfigmap) + assert.Equal(t, dockerfileConfigmap.Data["Dockerfile"], dockerfile) + + assert.Equal(t, osImageURLConfigMap.Data[baseOSContainerImageConfigKey], ibr.BaseImage.Pullspec) + assert.Equal(t, osImageURLConfigMap.Data[baseOSExtensionsContainerImageConfigKey], ibr.ExtensionsImage.Pullspec) + + assert.Equal(t, onClusterBuildConfigMap.Data[baseImagePullSecretNameConfigKey], ibr.BaseImage.PullSecret.Name) + assert.Equal(t, onClusterBuildConfigMap.Data[baseImagePullSecretNameConfigKey], ibr.ExtensionsImage.PullSecret.Name) + + assert.Equal(t, onClusterBuildConfigMap.Data[finalImagePullspecConfigKey], ibr.FinalImage.Pullspec) + + assert.Equal(t, onClusterBuildConfigMap.Data[finalImagePushSecretNameConfigKey], ibr.FinalImage.PullSecret.Name) + + assert.Equal(t, "dockerfile-rendered-worker-1", ibr.getDockerfileConfigMapName()) + assert.Equal(t, "build-rendered-worker-1", ibr.getBuildName()) + assert.Equal(t, "mc-rendered-worker-1", ibr.getMCConfigMapName()) +} + +// Tests that the Dockerfile is correctly rendered in the absence of the +// extensions image. For now, we just check whether the extensions image is +// imported. Once we wire up the extensions container, we'll need to modify +// this to ensure that the remainder of the Dockerfile gets rendered correctly. +func TestImageBuildRequestMissingExtensionsImage(t *testing.T) { + t.Parallel() + + mcp := newMachineConfigPool("worker", "rendered-worker-1") + + osImageURLConfigMap := getOSImageURLConfigMap() + onClusterBuildConfigMap := getOnClusterBuildConfigMap() + + delete(osImageURLConfigMap.Data, baseOSExtensionsContainerImageConfigKey) + + ibr := newImageBuildRequestWithConfigMap(mcp, osImageURLConfigMap, onClusterBuildConfigMap) + + dockerfile, err := ibr.renderDockerfile() + assert.NoError(t, err) + + assert.NotContains(t, dockerfile, "AS extensions") +} diff --git a/pkg/controller/build/pod_build_controller.go b/pkg/controller/build/pod_build_controller.go new file mode 100644 index 0000000000..adb7111d7b --- /dev/null +++ b/pkg/controller/build/pod_build_controller.go @@ -0,0 +1,355 @@ +package build + +import ( + "context" + "fmt" + "strings" + "time" + + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + aggerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + coreclientsetv1 "k8s.io/client-go/kubernetes/typed/core/v1" + corelistersv1 "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// Controller defines the build controller. +type PodBuildController struct { + *Clients + *informers + + eventRecorder record.EventRecorder + + // The function to call whenever we've encountered a build pod. This function is + // responsible for examining the build pod to determine what state its in and map + // that state to the appropriate MachineConfigPool object. + podHandler func(*corev1.Pod) error + + syncHandler func(pod string) error + enqueuePod func(*corev1.Pod) + + podLister corelistersv1.PodLister + + podListerSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface + + config BuildControllerConfig +} + +var _ ImageBuilder = (*PodBuildController)(nil) + +// Returns a new pod build controller. +func newPodBuildController( + ctrlConfig BuildControllerConfig, + clients *Clients, + podHandler func(*corev1.Pod) error, +) *PodBuildController { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&coreclientsetv1.EventSinkImpl{Interface: clients.kubeclient.CoreV1().Events("")}) + + ctrl := &PodBuildController{ + Clients: clients, + informers: newInformers(clients), + eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineosbuilder-podbuildcontroller"}), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineosbuilder-podbuildcontroller"), + config: ctrlConfig, + podHandler: podHandler, + } + + // As an aside, why doesn't the constructor here set up all the informers? + ctrl.podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addPod, + UpdateFunc: ctrl.updatePod, + DeleteFunc: ctrl.deletePod, + }) + + ctrl.podLister = ctrl.podInformer.Lister() + + ctrl.podListerSynced = ctrl.podInformer.Informer().HasSynced + + ctrl.syncHandler = ctrl.syncPod + ctrl.enqueuePod = ctrl.enqueueDefault + + return ctrl +} + +func (ctrl *PodBuildController) enqueue(pod *corev1.Pod) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pod) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pod, err)) + return + } + + ctrl.queue.Add(key) +} + +func (ctrl *PodBuildController) enqueueRateLimited(pod *corev1.Pod) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pod) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pod, err)) + return + } + + ctrl.queue.AddRateLimited(key) +} + +// enqueueAfter will enqueue a pod after the provided amount of time. +func (ctrl *PodBuildController) enqueueAfter(pod *corev1.Pod, after time.Duration) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pod) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", pod, err)) + return + } + + ctrl.queue.AddAfter(key, after) +} + +// enqueueDefault calls a default enqueue function +func (ctrl *PodBuildController) enqueueDefault(pod *corev1.Pod) { + ctrl.enqueueAfter(pod, ctrl.config.UpdateDelay) +} + +// Syncs pods. +func (ctrl *PodBuildController) syncPod(key string) error { //nolint:dupl // This does have commonality with the ImageBuildController. + start := time.Now() + defer func() { + klog.Infof("Finished syncing pod %s: %s", key, time.Since(start)) + }() + klog.Infof("Started syncing pod %s", key) + + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + // TODO: Why do I need to set the namespace here? + pod, err := ctrl.podLister.Pods(ctrlcommon.MCONamespace).Get(name) + if k8serrors.IsNotFound(err) { + klog.V(2).Infof("Pod %v has been deleted", key) + return nil + } + if err != nil { + return err + } + + pod, err = ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + // If we don't have all of the OS build labels attached to this pod, we + // ignore it. There is probably something we can do along the lines looking + // at ownership though. + if !hasAllRequiredOSBuildLabels(pod.Labels) { + klog.Infof("Ignoring non-build pod %s", pod.Name) + return nil + } + + if err := ctrl.podHandler(pod); err != nil { + return fmt.Errorf("unable to update with build pod status: %w", err) + } + + klog.Infof("Updated MachineConfigPool with build pod status. Build pod %s in %s", pod.Name, pod.Status.Phase) + + return nil +} + +// Starts the Pod Build Controller. +func (ctrl *PodBuildController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer ctrl.queue.ShutDown() + + ctrl.informers.start(ctx) + + if !cache.WaitForCacheSync(ctx.Done(), ctrl.podListerSynced) { + return + } + + klog.Info("Starting MachineOSBuilder-PodBuildController") + defer klog.Info("Shutting down MachineOSBuilder-PodBuildController") + + for i := 0; i < workers; i++ { + go wait.Until(ctrl.worker, time.Second, ctx.Done()) + } + + <-ctx.Done() +} + +// Gets the final image pullspec by retrieving the ConfigMap that the build pod +// creates from the Buildah digestfile. +func (ctrl *PodBuildController) FinalPullspec(pool *mcfgv1.MachineConfigPool) (string, error) { + onClusterBuildConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), onClusterBuildConfigMapName, metav1.GetOptions{}) + if err != nil { + return "", err + } + + finalImageInfo := newFinalImageInfo(onClusterBuildConfigMap) + ibr := newImageBuildRequest(pool) + + digestConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), ibr.getDigestConfigMapName(), metav1.GetOptions{}) + if err != nil { + return "", err + } + + return parseImagePullspec(finalImageInfo.Pullspec, digestConfigMap.Data["digest"]) +} + +// Deletes the underlying build pod. +func (ctrl *PodBuildController) DeleteBuildObject(pool *mcfgv1.MachineConfigPool) error { + // We want to ignore when a pod or ConfigMap is deleted if it is not found. + // This is because when a pool is opted out of layering *after* a successful + // build, no pod nor ConfigMap will remain. So we want to be able to + // idempotently call this function in that case. + return aggerrors.AggregateGoroutines( + func() error { + ibr := newImageBuildRequest(pool) + return ignoreIsNotFoundErr(ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getBuildName(), metav1.DeleteOptions{})) + }, + func() error { + ibr := newImageBuildRequest(pool) + return ignoreIsNotFoundErr(ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), ibr.getDigestConfigMapName(), metav1.DeleteOptions{})) + }, + ) +} + +// Determines if a build is currently running by looking for a corresponding pod. +func (ctrl *PodBuildController) IsBuildRunning(pool *mcfgv1.MachineConfigPool) (bool, error) { + ibr := newImageBuildRequest(pool) + + // First check if we have a build in progress for this MachineConfigPool and rendered config. + _, err := ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(context.TODO(), ibr.getBuildName(), metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return false, err + } + + return err == nil, nil +} + +// Starts a new build pod, assuming one is not found first. In that case, it returns an object reference to the preexisting build pod. +func (ctrl *PodBuildController) StartBuild(ibr ImageBuildRequest) (*corev1.ObjectReference, error) { + targetMC := ibr.Pool.Spec.Configuration.Name + + // TODO: Find a constant for this: + if !strings.HasPrefix(targetMC, "rendered-") { + return nil, fmt.Errorf("%s is not a rendered MachineConfig", targetMC) + } + + // First check if we have a build in progress for this MachineConfigPool and rendered config. + pod, err := ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Get(context.TODO(), ibr.getBuildName(), metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return nil, err + } + + // This means we found a preexisting build pod. + if pod != nil && err == nil && hasAllRequiredOSBuildLabels(pod.Labels) { + klog.Infof("Found preexisting build pod (%s) for pool %s", pod.Name, ibr.Pool.Name) + return toObjectRef(pod), nil + } + + klog.Infof("Starting build for pool %s", ibr.Pool.Name) + klog.Infof("Build pod name: %s", ibr.getBuildName()) + klog.Infof("Final image will be pushed to %q, using secret %q", ibr.FinalImage.Pullspec, ibr.FinalImage.PullSecret.Name) + + pod, err = ctrl.kubeclient.CoreV1().Pods(ctrlcommon.MCONamespace).Create(context.TODO(), ibr.toBuildPod(), metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("could not create build pod: %w", err) + } + + klog.Infof("Build started for pool %s in %s!", ibr.Pool.Name, pod.Name) + + return toObjectRef(pod), nil +} + +// Fires whenever a new pod is started. +func (ctrl *PodBuildController) addPod(obj interface{}) { + pod := obj.(*corev1.Pod).DeepCopy() + isBuildPod := hasAllRequiredOSBuildLabels(pod.Labels) + klog.V(4).Infof("Adding Pod %s. Is build pod? %v", pod.Name, isBuildPod) + if isBuildPod { + ctrl.enqueuePod(pod) + } +} + +// Fires whenever a pod is updated. +func (ctrl *PodBuildController) updatePod(oldObj, curObj interface{}) { + oldPod := oldObj.(*corev1.Pod).DeepCopy() + curPod := curObj.(*corev1.Pod).DeepCopy() + + isBuildPod := hasAllRequiredOSBuildLabels(curPod.Labels) + + // Ignore non-build pods. + // TODO: Figure out if we can add the filter criteria onto the lister. + if !isBuildPod { + return + } + + klog.Infof("Updating pod %s. Is build pod? %v", curPod.Name, isBuildPod) + + if oldPod.Status.Phase != curPod.Status.Phase { + klog.Infof("Pod %s changed from %s to %s", oldPod.Name, oldPod.Status.Phase, curPod.Status.Phase) + } + + klog.Infof("Pod %s updated", curPod.Name) + + ctrl.enqueuePod(curPod) +} + +func (ctrl *PodBuildController) handleErr(err error, key interface{}) { + if err == nil { + ctrl.queue.Forget(key) + return + } + + if ctrl.queue.NumRequeues(key) < ctrl.config.MaxRetries { + klog.V(2).Infof("Error syncing pod %v: %v", key, err) + ctrl.queue.AddRateLimited(key) + return + } + + utilruntime.HandleError(err) + klog.V(2).Infof("Dropping pod %q out of the queue: %v", key, err) + ctrl.queue.Forget(key) + ctrl.queue.AddAfter(key, 1*time.Minute) +} + +// Fires whenever a pod is deleted. +func (ctrl *PodBuildController) deletePod(obj interface{}) { + pod, ok := obj.(*corev1.Pod) + if !ok { + return + } + pod = pod.DeepCopy() + klog.V(4).Infof("Deleting Pod %s. Is build pod? %v", pod.Name, hasAllRequiredOSBuildLabels(pod.Labels)) + ctrl.enqueuePod(pod) +} + +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncHandler is never invoked concurrently with the same key. +func (ctrl *PodBuildController) worker() { + for ctrl.processNextWorkItem() { + } +} + +func (ctrl *PodBuildController) processNextWorkItem() bool { + key, quit := ctrl.queue.Get() + if quit { + return false + } + defer ctrl.queue.Done(key) + + err := ctrl.syncHandler(key.(string)) + ctrl.handleErr(err, key) + + return true +} diff --git a/pkg/controller/common/constants.go b/pkg/controller/common/constants.go index 12177c3618..571fc825f2 100644 --- a/pkg/controller/common/constants.go +++ b/pkg/controller/common/constants.go @@ -41,4 +41,13 @@ const ( MachineConfigPoolMaster = "master" // MachineConfigPoolWorker is the MachineConfigPool name given to the worker MachineConfigPoolWorker = "worker" + + // LayeringEnabledPoolLabel is the label that enables the "layered" workflow path for a pool. + LayeringEnabledPoolLabel = "machineconfiguration.openshift.io/layering-enabled" + + // ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey is the annotation that signifies which rendered config + // TODO(zzlotnik): Determine if we should use this still. + ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey = "machineconfiguration.openshift.io/newestImageEquivalentConfig" + + OSImageBuildPodLabel = "machineconfiguration.openshift.io/buildPod" ) diff --git a/pkg/controller/common/helpers.go b/pkg/controller/common/helpers.go index ff348f24e7..1ad0e2eb82 100644 --- a/pkg/controller/common/helpers.go +++ b/pkg/controller/common/helpers.go @@ -1170,3 +1170,10 @@ func (n namespacedEventRecorder) Eventf(object runtime.Object, eventtype, reason func (n namespacedEventRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { n.delegate.AnnotatedEventf(ensureEventNamespace(object), annotations, eventtype, reason, messageFmt, args...) } + +func IsLayeredPool(pool *mcfgv1.MachineConfigPool) bool { + if _, ok := pool.Labels[LayeringEnabledPoolLabel]; ok { + return true + } + return false +} diff --git a/pkg/daemon/constants/constants.go b/pkg/daemon/constants/constants.go index aaf272fd71..ce46ad69cf 100644 --- a/pkg/daemon/constants/constants.go +++ b/pkg/daemon/constants/constants.go @@ -8,6 +8,11 @@ const ( // // XXX + // CurrentImageAnnotationKey is used to get the current OS image pullspec for a machine + CurrentImageAnnotationKey = "machineconfiguration.openshift.io/currentImage" + // DesiredImageAnnotationKey is used to specify the desired OS image pullspec for a machine + DesiredImageAnnotationKey = "machineconfiguration.openshift.io/desiredImage" + // CurrentMachineConfigAnnotationKey is used to fetch current MachineConfig for a machine CurrentMachineConfigAnnotationKey = "machineconfiguration.openshift.io/currentConfig" // DesiredMachineConfigAnnotationKey is used to specify the desired MachineConfig for a machine diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go new file mode 100644 index 0000000000..d7c9c69806 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go @@ -0,0 +1,105 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + buildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + BuildV1() buildv1.BuildV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + buildV1 *buildv1.BuildV1Client +} + +// BuildV1 retrieves the BuildV1Client +func (c *Clientset) BuildV1() buildv1.BuildV1Interface { + return c.buildV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.buildV1, err = buildv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.buildV1 = buildv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go new file mode 100644 index 0000000000..0e0c2a8900 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..7070b0ed07 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,69 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/openshift/client-go/build/clientset/versioned" + buildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" + fakebuildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// BuildV1 retrieves the BuildV1Client +func (c *Clientset) BuildV1() buildv1.BuildV1Interface { + return &fakebuildv1.FakeBuildV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..3630ed1cd1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..c120bd193a --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/fake/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + buildv1 "github.com/openshift/api/build/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + buildv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/doc.go new file mode 100644 index 0000000000..2b5ba4c8e4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build.go new file mode 100644 index 0000000000..ea364d7eff --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build.go @@ -0,0 +1,196 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + buildv1 "github.com/openshift/api/build/v1" + applyconfigurationsbuildv1 "github.com/openshift/client-go/build/applyconfigurations/build/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeBuilds implements BuildInterface +type FakeBuilds struct { + Fake *FakeBuildV1 + ns string +} + +var buildsResource = schema.GroupVersionResource{Group: "build.openshift.io", Version: "v1", Resource: "builds"} + +var buildsKind = schema.GroupVersionKind{Group: "build.openshift.io", Version: "v1", Kind: "Build"} + +// Get takes name of the build, and returns the corresponding build object, and an error if there is any. +func (c *FakeBuilds) Get(ctx context.Context, name string, options v1.GetOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(buildsResource, c.ns, name), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// List takes label and field selectors, and returns the list of Builds that match those selectors. +func (c *FakeBuilds) List(ctx context.Context, opts v1.ListOptions) (result *buildv1.BuildList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(buildsResource, buildsKind, c.ns, opts), &buildv1.BuildList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &buildv1.BuildList{ListMeta: obj.(*buildv1.BuildList).ListMeta} + for _, item := range obj.(*buildv1.BuildList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested builds. +func (c *FakeBuilds) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(buildsResource, c.ns, opts)) + +} + +// Create takes the representation of a build and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) Create(ctx context.Context, build *buildv1.Build, opts v1.CreateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(buildsResource, c.ns, build), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// Update takes the representation of a build and updates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) Update(ctx context.Context, build *buildv1.Build, opts v1.UpdateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(buildsResource, c.ns, build), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeBuilds) UpdateStatus(ctx context.Context, build *buildv1.Build, opts v1.UpdateOptions) (*buildv1.Build, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(buildsResource, "status", c.ns, build), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// Delete takes name of the build and deletes it. Returns an error if one occurs. +func (c *FakeBuilds) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(buildsResource, c.ns, name, opts), &buildv1.Build{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeBuilds) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(buildsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &buildv1.BuildList{}) + return err +} + +// Patch applies the patch and returns the patched build. +func (c *FakeBuilds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildsResource, c.ns, name, pt, data, subresources...), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied build. +func (c *FakeBuilds) Apply(ctx context.Context, build *applyconfigurationsbuildv1.BuildApplyConfiguration, opts v1.ApplyOptions) (result *buildv1.Build, err error) { + if build == nil { + return nil, fmt.Errorf("build provided to Apply must not be nil") + } + data, err := json.Marshal(build) + if err != nil { + return nil, err + } + name := build.Name + if name == nil { + return nil, fmt.Errorf("build.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildsResource, c.ns, *name, types.ApplyPatchType, data), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeBuilds) ApplyStatus(ctx context.Context, build *applyconfigurationsbuildv1.BuildApplyConfiguration, opts v1.ApplyOptions) (result *buildv1.Build, err error) { + if build == nil { + return nil, fmt.Errorf("build provided to Apply must not be nil") + } + data, err := json.Marshal(build) + if err != nil { + return nil, err + } + name := build.Name + if name == nil { + return nil, fmt.Errorf("build.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// UpdateDetails takes the representation of a build and updates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) UpdateDetails(ctx context.Context, buildName string, build *buildv1.Build, opts v1.UpdateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(buildsResource, "details", c.ns, build), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} + +// Clone takes the representation of a buildRequest and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) Clone(ctx context.Context, buildName string, buildRequest *buildv1.BuildRequest, opts v1.CreateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateSubresourceAction(buildsResource, buildName, "clone", c.ns, buildRequest), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build_client.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build_client.go new file mode 100644 index 0000000000..31cdb947c4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_build_client.go @@ -0,0 +1,28 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeBuildV1 struct { + *testing.Fake +} + +func (c *FakeBuildV1) Builds(namespace string) v1.BuildInterface { + return &FakeBuilds{c, namespace} +} + +func (c *FakeBuildV1) BuildConfigs(namespace string) v1.BuildConfigInterface { + return &FakeBuildConfigs{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeBuildV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_buildconfig.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_buildconfig.go new file mode 100644 index 0000000000..44e79e3773 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake/fake_buildconfig.go @@ -0,0 +1,185 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + buildv1 "github.com/openshift/api/build/v1" + applyconfigurationsbuildv1 "github.com/openshift/client-go/build/applyconfigurations/build/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeBuildConfigs implements BuildConfigInterface +type FakeBuildConfigs struct { + Fake *FakeBuildV1 + ns string +} + +var buildconfigsResource = schema.GroupVersionResource{Group: "build.openshift.io", Version: "v1", Resource: "buildconfigs"} + +var buildconfigsKind = schema.GroupVersionKind{Group: "build.openshift.io", Version: "v1", Kind: "BuildConfig"} + +// Get takes name of the buildConfig, and returns the corresponding buildConfig object, and an error if there is any. +func (c *FakeBuildConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *buildv1.BuildConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(buildconfigsResource, c.ns, name), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// List takes label and field selectors, and returns the list of BuildConfigs that match those selectors. +func (c *FakeBuildConfigs) List(ctx context.Context, opts v1.ListOptions) (result *buildv1.BuildConfigList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(buildconfigsResource, buildconfigsKind, c.ns, opts), &buildv1.BuildConfigList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &buildv1.BuildConfigList{ListMeta: obj.(*buildv1.BuildConfigList).ListMeta} + for _, item := range obj.(*buildv1.BuildConfigList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested buildConfigs. +func (c *FakeBuildConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(buildconfigsResource, c.ns, opts)) + +} + +// Create takes the representation of a buildConfig and creates it. Returns the server's representation of the buildConfig, and an error, if there is any. +func (c *FakeBuildConfigs) Create(ctx context.Context, buildConfig *buildv1.BuildConfig, opts v1.CreateOptions) (result *buildv1.BuildConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(buildconfigsResource, c.ns, buildConfig), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// Update takes the representation of a buildConfig and updates it. Returns the server's representation of the buildConfig, and an error, if there is any. +func (c *FakeBuildConfigs) Update(ctx context.Context, buildConfig *buildv1.BuildConfig, opts v1.UpdateOptions) (result *buildv1.BuildConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(buildconfigsResource, c.ns, buildConfig), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeBuildConfigs) UpdateStatus(ctx context.Context, buildConfig *buildv1.BuildConfig, opts v1.UpdateOptions) (*buildv1.BuildConfig, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(buildconfigsResource, "status", c.ns, buildConfig), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// Delete takes name of the buildConfig and deletes it. Returns an error if one occurs. +func (c *FakeBuildConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(buildconfigsResource, c.ns, name, opts), &buildv1.BuildConfig{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeBuildConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(buildconfigsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &buildv1.BuildConfigList{}) + return err +} + +// Patch applies the patch and returns the patched buildConfig. +func (c *FakeBuildConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *buildv1.BuildConfig, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildconfigsResource, c.ns, name, pt, data, subresources...), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied buildConfig. +func (c *FakeBuildConfigs) Apply(ctx context.Context, buildConfig *applyconfigurationsbuildv1.BuildConfigApplyConfiguration, opts v1.ApplyOptions) (result *buildv1.BuildConfig, err error) { + if buildConfig == nil { + return nil, fmt.Errorf("buildConfig provided to Apply must not be nil") + } + data, err := json.Marshal(buildConfig) + if err != nil { + return nil, err + } + name := buildConfig.Name + if name == nil { + return nil, fmt.Errorf("buildConfig.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildconfigsResource, c.ns, *name, types.ApplyPatchType, data), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeBuildConfigs) ApplyStatus(ctx context.Context, buildConfig *applyconfigurationsbuildv1.BuildConfigApplyConfiguration, opts v1.ApplyOptions) (result *buildv1.BuildConfig, err error) { + if buildConfig == nil { + return nil, fmt.Errorf("buildConfig provided to Apply must not be nil") + } + data, err := json.Marshal(buildConfig) + if err != nil { + return nil, err + } + name := buildConfig.Name + if name == nil { + return nil, fmt.Errorf("buildConfig.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(buildconfigsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &buildv1.BuildConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.BuildConfig), err +} + +// Instantiate takes the representation of a buildRequest and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuildConfigs) Instantiate(ctx context.Context, buildConfigName string, buildRequest *buildv1.BuildRequest, opts v1.CreateOptions) (result *buildv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateSubresourceAction(buildconfigsResource, buildConfigName, "instantiate", c.ns, buildRequest), &buildv1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*buildv1.Build), err +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go new file mode 100644 index 0000000000..01a651928a --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package build + +import ( + v1 "github.com/openshift/client-go/build/informers/externalversions/build/v1" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go new file mode 100644 index 0000000000..2055ed96f5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + buildv1 "github.com/openshift/api/build/v1" + versioned "github.com/openshift/client-go/build/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/build/listers/build/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BuildInformer provides access to a shared informer and lister for +// Builds. +type BuildInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.BuildLister +} + +type buildInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBuildInformer constructs a new informer for Build type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBuildInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBuildInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBuildInformer constructs a new informer for Build type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBuildInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().Builds(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().Builds(namespace).Watch(context.TODO(), options) + }, + }, + &buildv1.Build{}, + resyncPeriod, + indexers, + ) +} + +func (f *buildInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBuildInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *buildInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&buildv1.Build{}, f.defaultInformer) +} + +func (f *buildInformer) Lister() v1.BuildLister { + return v1.NewBuildLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go new file mode 100644 index 0000000000..28012f8c6d --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + buildv1 "github.com/openshift/api/build/v1" + versioned "github.com/openshift/client-go/build/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/build/listers/build/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BuildConfigInformer provides access to a shared informer and lister for +// BuildConfigs. +type BuildConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.BuildConfigLister +} + +type buildConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBuildConfigInformer constructs a new informer for BuildConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBuildConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBuildConfigInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBuildConfigInformer constructs a new informer for BuildConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBuildConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().BuildConfigs(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BuildV1().BuildConfigs(namespace).Watch(context.TODO(), options) + }, + }, + &buildv1.BuildConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *buildConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBuildConfigInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *buildConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&buildv1.BuildConfig{}, f.defaultInformer) +} + +func (f *buildConfigInformer) Lister() v1.BuildConfigLister { + return v1.NewBuildConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go new file mode 100644 index 0000000000..da69fc9bb6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go @@ -0,0 +1,36 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Builds returns a BuildInformer. + Builds() BuildInformer + // BuildConfigs returns a BuildConfigInformer. + BuildConfigs() BuildConfigInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Builds returns a BuildInformer. +func (v *version) Builds() BuildInformer { + return &buildInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// BuildConfigs returns a BuildConfigInformer. +func (v *version) BuildConfigs() BuildConfigInformer { + return &buildConfigInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go new file mode 100644 index 0000000000..fadac908e0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go @@ -0,0 +1,164 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/build/clientset/versioned" + build "github.com/openshift/client-go/build/informers/externalversions/build" + internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Build() build.Interface +} + +func (f *sharedInformerFactory) Build() build.Interface { + return build.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go new file mode 100644 index 0000000000..e8b2035b70 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go @@ -0,0 +1,48 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/openshift/api/build/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=build.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("builds"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Build().V1().Builds().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("buildconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Build().V1().BuildConfigs().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..1bcbd5975a --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/build/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go new file mode 100644 index 0000000000..e072f9bac8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// BuildLister helps list Builds. +// All objects returned here must be treated as read-only. +type BuildLister interface { + // List lists all Builds in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Build, err error) + // Builds returns an object that can list and get Builds. + Builds(namespace string) BuildNamespaceLister + BuildListerExpansion +} + +// buildLister implements the BuildLister interface. +type buildLister struct { + indexer cache.Indexer +} + +// NewBuildLister returns a new BuildLister. +func NewBuildLister(indexer cache.Indexer) BuildLister { + return &buildLister{indexer: indexer} +} + +// List lists all Builds in the indexer. +func (s *buildLister) List(selector labels.Selector) (ret []*v1.Build, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Build)) + }) + return ret, err +} + +// Builds returns an object that can list and get Builds. +func (s *buildLister) Builds(namespace string) BuildNamespaceLister { + return buildNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// BuildNamespaceLister helps list and get Builds. +// All objects returned here must be treated as read-only. +type BuildNamespaceLister interface { + // List lists all Builds in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Build, err error) + // Get retrieves the Build from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Build, error) + BuildNamespaceListerExpansion +} + +// buildNamespaceLister implements the BuildNamespaceLister +// interface. +type buildNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Builds in the indexer for a given namespace. +func (s buildNamespaceLister) List(selector labels.Selector) (ret []*v1.Build, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Build)) + }) + return ret, err +} + +// Get retrieves the Build from the indexer for a given namespace and name. +func (s buildNamespaceLister) Get(name string) (*v1.Build, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("build"), name) + } + return obj.(*v1.Build), nil +} diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go new file mode 100644 index 0000000000..d2bbdb4ec6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// BuildConfigLister helps list BuildConfigs. +// All objects returned here must be treated as read-only. +type BuildConfigLister interface { + // List lists all BuildConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.BuildConfig, err error) + // BuildConfigs returns an object that can list and get BuildConfigs. + BuildConfigs(namespace string) BuildConfigNamespaceLister + BuildConfigListerExpansion +} + +// buildConfigLister implements the BuildConfigLister interface. +type buildConfigLister struct { + indexer cache.Indexer +} + +// NewBuildConfigLister returns a new BuildConfigLister. +func NewBuildConfigLister(indexer cache.Indexer) BuildConfigLister { + return &buildConfigLister{indexer: indexer} +} + +// List lists all BuildConfigs in the indexer. +func (s *buildConfigLister) List(selector labels.Selector) (ret []*v1.BuildConfig, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BuildConfig)) + }) + return ret, err +} + +// BuildConfigs returns an object that can list and get BuildConfigs. +func (s *buildConfigLister) BuildConfigs(namespace string) BuildConfigNamespaceLister { + return buildConfigNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// BuildConfigNamespaceLister helps list and get BuildConfigs. +// All objects returned here must be treated as read-only. +type BuildConfigNamespaceLister interface { + // List lists all BuildConfigs in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.BuildConfig, err error) + // Get retrieves the BuildConfig from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.BuildConfig, error) + BuildConfigNamespaceListerExpansion +} + +// buildConfigNamespaceLister implements the BuildConfigNamespaceLister +// interface. +type buildConfigNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all BuildConfigs in the indexer for a given namespace. +func (s buildConfigNamespaceLister) List(selector labels.Selector) (ret []*v1.BuildConfig, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BuildConfig)) + }) + return ret, err +} + +// Get retrieves the BuildConfig from the indexer for a given namespace and name. +func (s buildConfigNamespaceLister) Get(name string) (*v1.BuildConfig, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("buildconfig"), name) + } + return obj.(*v1.BuildConfig), nil +} diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go new file mode 100644 index 0000000000..1fc9faecdd --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go @@ -0,0 +1,19 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// BuildListerExpansion allows custom methods to be added to +// BuildLister. +type BuildListerExpansion interface{} + +// BuildNamespaceListerExpansion allows custom methods to be added to +// BuildNamespaceLister. +type BuildNamespaceListerExpansion interface{} + +// BuildConfigListerExpansion allows custom methods to be added to +// BuildConfigLister. +type BuildConfigListerExpansion interface{} + +// BuildConfigNamespaceListerExpansion allows custom methods to be added to +// BuildConfigNamespaceLister. +type BuildConfigNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go new file mode 100644 index 0000000000..b0ebcebf51 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go @@ -0,0 +1,105 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + imagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + ImageV1() imagev1.ImageV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + imageV1 *imagev1.ImageV1Client +} + +// ImageV1 retrieves the ImageV1Client +func (c *Clientset) ImageV1() imagev1.ImageV1Interface { + return c.imageV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.imageV1, err = imagev1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.imageV1 = imagev1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go new file mode 100644 index 0000000000..0e0c2a8900 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..dfb57b4e10 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,69 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/openshift/client-go/image/clientset/versioned" + imagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + fakeimagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// ImageV1 retrieves the ImageV1Client +func (c *Clientset) ImageV1() imagev1.ImageV1Interface { + return &fakeimagev1.FakeImageV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..3630ed1cd1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..d7efdf27ee --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + imagev1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/doc.go new file mode 100644 index 0000000000..2b5ba4c8e4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image.go new file mode 100644 index 0000000000..c32387af93 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image.go @@ -0,0 +1,130 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + imagev1 "github.com/openshift/api/image/v1" + applyconfigurationsimagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImages implements ImageInterface +type FakeImages struct { + Fake *FakeImageV1 +} + +var imagesResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "images"} + +var imagesKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "Image"} + +// Get takes name of the image, and returns the corresponding image object, and an error if there is any. +func (c *FakeImages) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(imagesResource, name), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} + +// List takes label and field selectors, and returns the list of Images that match those selectors. +func (c *FakeImages) List(ctx context.Context, opts v1.ListOptions) (result *imagev1.ImageList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(imagesResource, imagesKind, opts), &imagev1.ImageList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &imagev1.ImageList{ListMeta: obj.(*imagev1.ImageList).ListMeta} + for _, item := range obj.(*imagev1.ImageList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested images. +func (c *FakeImages) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(imagesResource, opts)) +} + +// Create takes the representation of a image and creates it. Returns the server's representation of the image, and an error, if there is any. +func (c *FakeImages) Create(ctx context.Context, image *imagev1.Image, opts v1.CreateOptions) (result *imagev1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(imagesResource, image), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} + +// Update takes the representation of a image and updates it. Returns the server's representation of the image, and an error, if there is any. +func (c *FakeImages) Update(ctx context.Context, image *imagev1.Image, opts v1.UpdateOptions) (result *imagev1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(imagesResource, image), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} + +// Delete takes name of the image and deletes it. Returns an error if one occurs. +func (c *FakeImages) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(imagesResource, name, opts), &imagev1.Image{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImages) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(imagesResource, listOpts) + + _, err := c.Fake.Invokes(action, &imagev1.ImageList{}) + return err +} + +// Patch applies the patch and returns the patched image. +func (c *FakeImages) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *imagev1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagesResource, name, pt, data, subresources...), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied image. +func (c *FakeImages) Apply(ctx context.Context, image *applyconfigurationsimagev1.ImageApplyConfiguration, opts v1.ApplyOptions) (result *imagev1.Image, err error) { + if image == nil { + return nil, fmt.Errorf("image provided to Apply must not be nil") + } + data, err := json.Marshal(image) + if err != nil { + return nil, err + } + name := image.Name + if name == nil { + return nil, fmt.Errorf("image.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagesResource, *name, types.ApplyPatchType, data), &imagev1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*imagev1.Image), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image_client.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image_client.go new file mode 100644 index 0000000000..c135a79bb6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image_client.go @@ -0,0 +1,52 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeImageV1 struct { + *testing.Fake +} + +func (c *FakeImageV1) Images() v1.ImageInterface { + return &FakeImages{c} +} + +func (c *FakeImageV1) ImageSignatures() v1.ImageSignatureInterface { + return &FakeImageSignatures{c} +} + +func (c *FakeImageV1) ImageStreams(namespace string) v1.ImageStreamInterface { + return &FakeImageStreams{c, namespace} +} + +func (c *FakeImageV1) ImageStreamImages(namespace string) v1.ImageStreamImageInterface { + return &FakeImageStreamImages{c, namespace} +} + +func (c *FakeImageV1) ImageStreamImports(namespace string) v1.ImageStreamImportInterface { + return &FakeImageStreamImports{c, namespace} +} + +func (c *FakeImageV1) ImageStreamMappings(namespace string) v1.ImageStreamMappingInterface { + return &FakeImageStreamMappings{c, namespace} +} + +func (c *FakeImageV1) ImageStreamTags(namespace string) v1.ImageStreamTagInterface { + return &FakeImageStreamTags{c, namespace} +} + +func (c *FakeImageV1) ImageTags(namespace string) v1.ImageTagInterface { + return &FakeImageTags{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeImageV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagesignature.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagesignature.go new file mode 100644 index 0000000000..0ff22e2fd2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagesignature.go @@ -0,0 +1,38 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/openshift/api/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageSignatures implements ImageSignatureInterface +type FakeImageSignatures struct { + Fake *FakeImageV1 +} + +var imagesignaturesResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagesignatures"} + +var imagesignaturesKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageSignature"} + +// Create takes the representation of a imageSignature and creates it. Returns the server's representation of the imageSignature, and an error, if there is any. +func (c *FakeImageSignatures) Create(ctx context.Context, imageSignature *v1.ImageSignature, opts metav1.CreateOptions) (result *v1.ImageSignature, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(imagesignaturesResource, imageSignature), &v1.ImageSignature{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ImageSignature), err +} + +// Delete takes name of the imageSignature and deletes it. Returns an error if one occurs. +func (c *FakeImageSignatures) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(imagesignaturesResource, name, opts), &v1.ImageSignature{}) + return err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go new file mode 100644 index 0000000000..7db6c8a822 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go @@ -0,0 +1,196 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + imagev1 "github.com/openshift/api/image/v1" + applyconfigurationsimagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreams implements ImageStreamInterface +type FakeImageStreams struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreams"} + +var imagestreamsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStream"} + +// Get takes name of the imageStream, and returns the corresponding imageStream object, and an error if there is any. +func (c *FakeImageStreams) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageStream, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagestreamsResource, c.ns, name), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// List takes label and field selectors, and returns the list of ImageStreams that match those selectors. +func (c *FakeImageStreams) List(ctx context.Context, opts v1.ListOptions) (result *imagev1.ImageStreamList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(imagestreamsResource, imagestreamsKind, c.ns, opts), &imagev1.ImageStreamList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &imagev1.ImageStreamList{ListMeta: obj.(*imagev1.ImageStreamList).ListMeta} + for _, item := range obj.(*imagev1.ImageStreamList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested imageStreams. +func (c *FakeImageStreams) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(imagestreamsResource, c.ns, opts)) + +} + +// Create takes the representation of a imageStream and creates it. Returns the server's representation of the imageStream, and an error, if there is any. +func (c *FakeImageStreams) Create(ctx context.Context, imageStream *imagev1.ImageStream, opts v1.CreateOptions) (result *imagev1.ImageStream, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagestreamsResource, c.ns, imageStream), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// Update takes the representation of a imageStream and updates it. Returns the server's representation of the imageStream, and an error, if there is any. +func (c *FakeImageStreams) Update(ctx context.Context, imageStream *imagev1.ImageStream, opts v1.UpdateOptions) (result *imagev1.ImageStream, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(imagestreamsResource, c.ns, imageStream), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeImageStreams) UpdateStatus(ctx context.Context, imageStream *imagev1.ImageStream, opts v1.UpdateOptions) (*imagev1.ImageStream, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(imagestreamsResource, "status", c.ns, imageStream), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// Delete takes name of the imageStream and deletes it. Returns an error if one occurs. +func (c *FakeImageStreams) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(imagestreamsResource, c.ns, name, opts), &imagev1.ImageStream{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImageStreams) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(imagestreamsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &imagev1.ImageStreamList{}) + return err +} + +// Patch applies the patch and returns the patched imageStream. +func (c *FakeImageStreams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *imagev1.ImageStream, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(imagestreamsResource, c.ns, name, pt, data, subresources...), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied imageStream. +func (c *FakeImageStreams) Apply(ctx context.Context, imageStream *applyconfigurationsimagev1.ImageStreamApplyConfiguration, opts v1.ApplyOptions) (result *imagev1.ImageStream, err error) { + if imageStream == nil { + return nil, fmt.Errorf("imageStream provided to Apply must not be nil") + } + data, err := json.Marshal(imageStream) + if err != nil { + return nil, err + } + name := imageStream.Name + if name == nil { + return nil, fmt.Errorf("imageStream.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(imagestreamsResource, c.ns, *name, types.ApplyPatchType, data), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeImageStreams) ApplyStatus(ctx context.Context, imageStream *applyconfigurationsimagev1.ImageStreamApplyConfiguration, opts v1.ApplyOptions) (result *imagev1.ImageStream, err error) { + if imageStream == nil { + return nil, fmt.Errorf("imageStream provided to Apply must not be nil") + } + data, err := json.Marshal(imageStream) + if err != nil { + return nil, err + } + name := imageStream.Name + if name == nil { + return nil, fmt.Errorf("imageStream.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(imagestreamsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &imagev1.ImageStream{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStream), err +} + +// Secrets takes name of the imageStream, and returns the corresponding secretList object, and an error if there is any. +func (c *FakeImageStreams) Secrets(ctx context.Context, imageStreamName string, options v1.GetOptions) (result *imagev1.SecretList, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(imagestreamsResource, c.ns, "secrets", imageStreamName), &imagev1.SecretList{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.SecretList), err +} + +// Layers takes name of the imageStream, and returns the corresponding imageStreamLayers object, and an error if there is any. +func (c *FakeImageStreams) Layers(ctx context.Context, imageStreamName string, options v1.GetOptions) (result *imagev1.ImageStreamLayers, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(imagestreamsResource, c.ns, "layers", imageStreamName), &imagev1.ImageStreamLayers{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamLayers), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimage.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimage.go new file mode 100644 index 0000000000..aa97914259 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimage.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamImages implements ImageStreamImageInterface +type FakeImageStreamImages struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamimagesResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreamimages"} + +var imagestreamimagesKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStreamImage"} + +// Get takes name of the imageStreamImage, and returns the corresponding imageStreamImage object, and an error if there is any. +func (c *FakeImageStreamImages) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageStreamImage, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagestreamimagesResource, c.ns, name), &imagev1.ImageStreamImage{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamImage), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimport.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimport.go new file mode 100644 index 0000000000..5e7e5da326 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimport.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/openshift/api/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamImports implements ImageStreamImportInterface +type FakeImageStreamImports struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamimportsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreamimports"} + +var imagestreamimportsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStreamImport"} + +// Create takes the representation of a imageStreamImport and creates it. Returns the server's representation of the imageStreamImport, and an error, if there is any. +func (c *FakeImageStreamImports) Create(ctx context.Context, imageStreamImport *v1.ImageStreamImport, opts metav1.CreateOptions) (result *v1.ImageStreamImport, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagestreamimportsResource, c.ns, imageStreamImport), &v1.ImageStreamImport{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ImageStreamImport), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreammapping.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreammapping.go new file mode 100644 index 0000000000..d50ddbd1ed --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreammapping.go @@ -0,0 +1,59 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + imagev1 "github.com/openshift/api/image/v1" + v1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamMappings implements ImageStreamMappingInterface +type FakeImageStreamMappings struct { + Fake *FakeImageV1 + ns string +} + +var imagestreammappingsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreammappings"} + +var imagestreammappingsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStreamMapping"} + +// Apply takes the given apply declarative configuration, applies it and returns the applied imageStreamMapping. +func (c *FakeImageStreamMappings) Apply(ctx context.Context, imageStreamMapping *v1.ImageStreamMappingApplyConfiguration, opts metav1.ApplyOptions) (result *imagev1.ImageStreamMapping, err error) { + if imageStreamMapping == nil { + return nil, fmt.Errorf("imageStreamMapping provided to Apply must not be nil") + } + data, err := json.Marshal(imageStreamMapping) + if err != nil { + return nil, err + } + name := imageStreamMapping.Name + if name == nil { + return nil, fmt.Errorf("imageStreamMapping.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(imagestreammappingsResource, c.ns, *name, types.ApplyPatchType, data), &imagev1.ImageStreamMapping{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamMapping), err +} + +// Create takes the representation of a imageStreamMapping and creates it. Returns the server's representation of the status, and an error, if there is any. +func (c *FakeImageStreamMappings) Create(ctx context.Context, imageStreamMapping *imagev1.ImageStreamMapping, opts metav1.CreateOptions) (result *metav1.Status, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagestreammappingsResource, c.ns, imageStreamMapping), &metav1.Status{}) + + if obj == nil { + return nil, err + } + return obj.(*metav1.Status), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamtag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamtag.go new file mode 100644 index 0000000000..0befdecac1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamtag.go @@ -0,0 +1,86 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamTags implements ImageStreamTagInterface +type FakeImageStreamTags struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamtagsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagestreamtags"} + +var imagestreamtagsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageStreamTag"} + +// Get takes name of the imageStreamTag, and returns the corresponding imageStreamTag object, and an error if there is any. +func (c *FakeImageStreamTags) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageStreamTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagestreamtagsResource, c.ns, name), &imagev1.ImageStreamTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamTag), err +} + +// List takes label and field selectors, and returns the list of ImageStreamTags that match those selectors. +func (c *FakeImageStreamTags) List(ctx context.Context, opts v1.ListOptions) (result *imagev1.ImageStreamTagList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(imagestreamtagsResource, imagestreamtagsKind, c.ns, opts), &imagev1.ImageStreamTagList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &imagev1.ImageStreamTagList{ListMeta: obj.(*imagev1.ImageStreamTagList).ListMeta} + for _, item := range obj.(*imagev1.ImageStreamTagList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Create takes the representation of a imageStreamTag and creates it. Returns the server's representation of the imageStreamTag, and an error, if there is any. +func (c *FakeImageStreamTags) Create(ctx context.Context, imageStreamTag *imagev1.ImageStreamTag, opts v1.CreateOptions) (result *imagev1.ImageStreamTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagestreamtagsResource, c.ns, imageStreamTag), &imagev1.ImageStreamTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamTag), err +} + +// Update takes the representation of a imageStreamTag and updates it. Returns the server's representation of the imageStreamTag, and an error, if there is any. +func (c *FakeImageStreamTags) Update(ctx context.Context, imageStreamTag *imagev1.ImageStreamTag, opts v1.UpdateOptions) (result *imagev1.ImageStreamTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(imagestreamtagsResource, c.ns, imageStreamTag), &imagev1.ImageStreamTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageStreamTag), err +} + +// Delete takes name of the imageStreamTag and deletes it. Returns an error if one occurs. +func (c *FakeImageStreamTags) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(imagestreamtagsResource, c.ns, name, opts), &imagev1.ImageStreamTag{}) + + return err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagetag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagetag.go new file mode 100644 index 0000000000..6bf41d7d97 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagetag.go @@ -0,0 +1,86 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeImageTags implements ImageTagInterface +type FakeImageTags struct { + Fake *FakeImageV1 + ns string +} + +var imagetagsResource = schema.GroupVersionResource{Group: "image.openshift.io", Version: "v1", Resource: "imagetags"} + +var imagetagsKind = schema.GroupVersionKind{Group: "image.openshift.io", Version: "v1", Kind: "ImageTag"} + +// Get takes name of the imageTag, and returns the corresponding imageTag object, and an error if there is any. +func (c *FakeImageTags) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagetagsResource, c.ns, name), &imagev1.ImageTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageTag), err +} + +// List takes label and field selectors, and returns the list of ImageTags that match those selectors. +func (c *FakeImageTags) List(ctx context.Context, opts v1.ListOptions) (result *imagev1.ImageTagList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(imagetagsResource, imagetagsKind, c.ns, opts), &imagev1.ImageTagList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &imagev1.ImageTagList{ListMeta: obj.(*imagev1.ImageTagList).ListMeta} + for _, item := range obj.(*imagev1.ImageTagList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Create takes the representation of a imageTag and creates it. Returns the server's representation of the imageTag, and an error, if there is any. +func (c *FakeImageTags) Create(ctx context.Context, imageTag *imagev1.ImageTag, opts v1.CreateOptions) (result *imagev1.ImageTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(imagetagsResource, c.ns, imageTag), &imagev1.ImageTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageTag), err +} + +// Update takes the representation of a imageTag and updates it. Returns the server's representation of the imageTag, and an error, if there is any. +func (c *FakeImageTags) Update(ctx context.Context, imageTag *imagev1.ImageTag, opts v1.UpdateOptions) (result *imagev1.ImageTag, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(imagetagsResource, c.ns, imageTag), &imagev1.ImageTag{}) + + if obj == nil { + return nil, err + } + return obj.(*imagev1.ImageTag), err +} + +// Delete takes name of the imageTag and deletes it. Returns an error if one occurs. +func (c *FakeImageTags) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(imagetagsResource, c.ns, name, opts), &imagev1.ImageTag{}) + + return err +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go new file mode 100644 index 0000000000..067795180f --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go @@ -0,0 +1,164 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/image/clientset/versioned" + image "github.com/openshift/client-go/image/informers/externalversions/image" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Image() image.Interface +} + +func (f *sharedInformerFactory) Image() image.Interface { + return image.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go new file mode 100644 index 0000000000..55f59dedef --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go @@ -0,0 +1,48 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/openshift/api/image/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=image.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("images"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Image().V1().Images().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("imagestreams"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Image().V1().ImageStreams().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go new file mode 100644 index 0000000000..092550ed3d --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package image + +import ( + v1 "github.com/openshift/client-go/image/informers/externalversions/image/v1" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go new file mode 100644 index 0000000000..ee2d0a7067 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + imagev1 "github.com/openshift/api/image/v1" + versioned "github.com/openshift/client-go/image/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/image/listers/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageInformer provides access to a shared informer and lister for +// Images. +type ImageInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ImageLister +} + +type imageInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().Images().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().Images().Watch(context.TODO(), options) + }, + }, + &imagev1.Image{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&imagev1.Image{}, f.defaultInformer) +} + +func (f *imageInformer) Lister() v1.ImageLister { + return v1.NewImageLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go new file mode 100644 index 0000000000..4a94cc5c7d --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go @@ -0,0 +1,74 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + imagev1 "github.com/openshift/api/image/v1" + versioned "github.com/openshift/client-go/image/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/image/listers/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageStreamInformer provides access to a shared informer and lister for +// ImageStreams. +type ImageStreamInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ImageStreamLister +} + +type imageStreamInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewImageStreamInformer constructs a new informer for ImageStream type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageStreamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageStreamInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredImageStreamInformer constructs a new informer for ImageStream type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageStreamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().ImageStreams(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().ImageStreams(namespace).Watch(context.TODO(), options) + }, + }, + &imagev1.ImageStream{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageStreamInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageStreamInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageStreamInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&imagev1.ImageStream{}, f.defaultInformer) +} + +func (f *imageStreamInformer) Lister() v1.ImageStreamLister { + return v1.NewImageStreamLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go new file mode 100644 index 0000000000..fd35c4df1a --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go @@ -0,0 +1,36 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Images returns a ImageInformer. + Images() ImageInformer + // ImageStreams returns a ImageStreamInformer. + ImageStreams() ImageStreamInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Images returns a ImageInformer. +func (v *version) Images() ImageInformer { + return &imageInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ImageStreams returns a ImageStreamInformer. +func (v *version) ImageStreams() ImageStreamInformer { + return &imageStreamInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..c35dcbfa44 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/image/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go new file mode 100644 index 0000000000..308b6db702 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go @@ -0,0 +1,31 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// ImageListerExpansion allows custom methods to be added to +// ImageLister. +type ImageListerExpansion interface{} + +// ImageStreamListerExpansion allows custom methods to be added to +// ImageStreamLister. +type ImageStreamListerExpansion interface{} + +// ImageStreamNamespaceListerExpansion allows custom methods to be added to +// ImageStreamNamespaceLister. +type ImageStreamNamespaceListerExpansion interface{} + +// ImageStreamTagListerExpansion allows custom methods to be added to +// ImageStreamTagLister. +type ImageStreamTagListerExpansion interface{} + +// ImageStreamTagNamespaceListerExpansion allows custom methods to be added to +// ImageStreamTagNamespaceLister. +type ImageStreamTagNamespaceListerExpansion interface{} + +// ImageTagListerExpansion allows custom methods to be added to +// ImageTagLister. +type ImageTagListerExpansion interface{} + +// ImageTagNamespaceListerExpansion allows custom methods to be added to +// ImageTagNamespaceLister. +type ImageTagNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go new file mode 100644 index 0000000000..bb66460a77 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go @@ -0,0 +1,52 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ImageLister helps list Images. +// All objects returned here must be treated as read-only. +type ImageLister interface { + // List lists all Images in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Image, err error) + // Get retrieves the Image from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Image, error) + ImageListerExpansion +} + +// imageLister implements the ImageLister interface. +type imageLister struct { + indexer cache.Indexer +} + +// NewImageLister returns a new ImageLister. +func NewImageLister(indexer cache.Indexer) ImageLister { + return &imageLister{indexer: indexer} +} + +// List lists all Images in the indexer. +func (s *imageLister) List(selector labels.Selector) (ret []*v1.Image, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Image)) + }) + return ret, err +} + +// Get retrieves the Image from the index for a given name. +func (s *imageLister) Get(name string) (*v1.Image, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("image"), name) + } + return obj.(*v1.Image), nil +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go new file mode 100644 index 0000000000..02ed4da365 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ImageStreamLister helps list ImageStreams. +// All objects returned here must be treated as read-only. +type ImageStreamLister interface { + // List lists all ImageStreams in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStream, err error) + // ImageStreams returns an object that can list and get ImageStreams. + ImageStreams(namespace string) ImageStreamNamespaceLister + ImageStreamListerExpansion +} + +// imageStreamLister implements the ImageStreamLister interface. +type imageStreamLister struct { + indexer cache.Indexer +} + +// NewImageStreamLister returns a new ImageStreamLister. +func NewImageStreamLister(indexer cache.Indexer) ImageStreamLister { + return &imageStreamLister{indexer: indexer} +} + +// List lists all ImageStreams in the indexer. +func (s *imageStreamLister) List(selector labels.Selector) (ret []*v1.ImageStream, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageStream)) + }) + return ret, err +} + +// ImageStreams returns an object that can list and get ImageStreams. +func (s *imageStreamLister) ImageStreams(namespace string) ImageStreamNamespaceLister { + return imageStreamNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ImageStreamNamespaceLister helps list and get ImageStreams. +// All objects returned here must be treated as read-only. +type ImageStreamNamespaceLister interface { + // List lists all ImageStreams in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStream, err error) + // Get retrieves the ImageStream from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ImageStream, error) + ImageStreamNamespaceListerExpansion +} + +// imageStreamNamespaceLister implements the ImageStreamNamespaceLister +// interface. +type imageStreamNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ImageStreams in the indexer for a given namespace. +func (s imageStreamNamespaceLister) List(selector labels.Selector) (ret []*v1.ImageStream, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageStream)) + }) + return ret, err +} + +// Get retrieves the ImageStream from the indexer for a given namespace and name. +func (s imageStreamNamespaceLister) Get(name string) (*v1.ImageStream, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("imagestream"), name) + } + return obj.(*v1.ImageStream), nil +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go new file mode 100644 index 0000000000..6042b27bbe --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ImageStreamTagLister helps list ImageStreamTags. +// All objects returned here must be treated as read-only. +type ImageStreamTagLister interface { + // List lists all ImageStreamTags in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) + // ImageStreamTags returns an object that can list and get ImageStreamTags. + ImageStreamTags(namespace string) ImageStreamTagNamespaceLister + ImageStreamTagListerExpansion +} + +// imageStreamTagLister implements the ImageStreamTagLister interface. +type imageStreamTagLister struct { + indexer cache.Indexer +} + +// NewImageStreamTagLister returns a new ImageStreamTagLister. +func NewImageStreamTagLister(indexer cache.Indexer) ImageStreamTagLister { + return &imageStreamTagLister{indexer: indexer} +} + +// List lists all ImageStreamTags in the indexer. +func (s *imageStreamTagLister) List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageStreamTag)) + }) + return ret, err +} + +// ImageStreamTags returns an object that can list and get ImageStreamTags. +func (s *imageStreamTagLister) ImageStreamTags(namespace string) ImageStreamTagNamespaceLister { + return imageStreamTagNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ImageStreamTagNamespaceLister helps list and get ImageStreamTags. +// All objects returned here must be treated as read-only. +type ImageStreamTagNamespaceLister interface { + // List lists all ImageStreamTags in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) + // Get retrieves the ImageStreamTag from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ImageStreamTag, error) + ImageStreamTagNamespaceListerExpansion +} + +// imageStreamTagNamespaceLister implements the ImageStreamTagNamespaceLister +// interface. +type imageStreamTagNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ImageStreamTags in the indexer for a given namespace. +func (s imageStreamTagNamespaceLister) List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageStreamTag)) + }) + return ret, err +} + +// Get retrieves the ImageStreamTag from the indexer for a given namespace and name. +func (s imageStreamTagNamespaceLister) Get(name string) (*v1.ImageStreamTag, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("imagestreamtag"), name) + } + return obj.(*v1.ImageStreamTag), nil +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go new file mode 100644 index 0000000000..bbc4518c23 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go @@ -0,0 +1,83 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ImageTagLister helps list ImageTags. +// All objects returned here must be treated as read-only. +type ImageTagLister interface { + // List lists all ImageTags in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageTag, err error) + // ImageTags returns an object that can list and get ImageTags. + ImageTags(namespace string) ImageTagNamespaceLister + ImageTagListerExpansion +} + +// imageTagLister implements the ImageTagLister interface. +type imageTagLister struct { + indexer cache.Indexer +} + +// NewImageTagLister returns a new ImageTagLister. +func NewImageTagLister(indexer cache.Indexer) ImageTagLister { + return &imageTagLister{indexer: indexer} +} + +// List lists all ImageTags in the indexer. +func (s *imageTagLister) List(selector labels.Selector) (ret []*v1.ImageTag, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageTag)) + }) + return ret, err +} + +// ImageTags returns an object that can list and get ImageTags. +func (s *imageTagLister) ImageTags(namespace string) ImageTagNamespaceLister { + return imageTagNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ImageTagNamespaceLister helps list and get ImageTags. +// All objects returned here must be treated as read-only. +type ImageTagNamespaceLister interface { + // List lists all ImageTags in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageTag, err error) + // Get retrieves the ImageTag from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ImageTag, error) + ImageTagNamespaceListerExpansion +} + +// imageTagNamespaceLister implements the ImageTagNamespaceLister +// interface. +type imageTagNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ImageTags in the indexer for a given namespace. +func (s imageTagNamespaceLister) List(selector labels.Selector) (ret []*v1.ImageTag, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ImageTag)) + }) + return ret, err +} + +// Get retrieves the ImageTag from the indexer for a given namespace and name. +func (s imageTagNamespaceLister) Get(name string) (*v1.ImageTag, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("imagetag"), name) + } + return obj.(*v1.ImageTag), nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index fc98ba0579..7874b89f56 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -878,8 +878,16 @@ github.com/openshift/api/user/v1 ## explicit; go 1.20 github.com/openshift/client-go/build/applyconfigurations/build/v1 github.com/openshift/client-go/build/applyconfigurations/internal +github.com/openshift/client-go/build/clientset/versioned +github.com/openshift/client-go/build/clientset/versioned/fake github.com/openshift/client-go/build/clientset/versioned/scheme github.com/openshift/client-go/build/clientset/versioned/typed/build/v1 +github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/fake +github.com/openshift/client-go/build/informers/externalversions +github.com/openshift/client-go/build/informers/externalversions/build +github.com/openshift/client-go/build/informers/externalversions/build/v1 +github.com/openshift/client-go/build/informers/externalversions/internalinterfaces +github.com/openshift/client-go/build/listers/build/v1 github.com/openshift/client-go/config/applyconfigurations/config/v1 github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1 github.com/openshift/client-go/config/applyconfigurations/internal @@ -899,8 +907,16 @@ github.com/openshift/client-go/config/listers/config/v1 github.com/openshift/client-go/config/listers/config/v1alpha1 github.com/openshift/client-go/image/applyconfigurations/image/v1 github.com/openshift/client-go/image/applyconfigurations/internal +github.com/openshift/client-go/image/clientset/versioned +github.com/openshift/client-go/image/clientset/versioned/fake github.com/openshift/client-go/image/clientset/versioned/scheme github.com/openshift/client-go/image/clientset/versioned/typed/image/v1 +github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake +github.com/openshift/client-go/image/informers/externalversions +github.com/openshift/client-go/image/informers/externalversions/image +github.com/openshift/client-go/image/informers/externalversions/image/v1 +github.com/openshift/client-go/image/informers/externalversions/internalinterfaces +github.com/openshift/client-go/image/listers/image/v1 github.com/openshift/client-go/operator/applyconfigurations/internal github.com/openshift/client-go/operator/applyconfigurations/operator/v1 github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1