diff --git a/pkg/reconciler/revision/cruds.go b/pkg/reconciler/revision/cruds.go index d11837b8124d..d3dc4ed120d7 100644 --- a/pkg/reconciler/revision/cruds.go +++ b/pkg/reconciler/revision/cruds.go @@ -109,8 +109,8 @@ func (c *Reconciler) checkAndUpdateDeployment(ctx context.Context, rev *v1.Revis return d, nil } -func (c *Reconciler) createImageCache(ctx context.Context, rev *v1.Revision) (*caching.Image, error) { - image := resources.MakeImageCache(rev) +func (c *Reconciler) createImageCache(rev *v1.Revision, containerName, imageDigest string) (*caching.Image, error) { + image := resources.MakeImageCache(rev, containerName, imageDigest) return c.cachingclient.CachingV1alpha1().Images(image.Namespace).Create(image) } diff --git a/pkg/reconciler/revision/reconcile_resources.go b/pkg/reconciler/revision/reconcile_resources.go index 13360eae0cd6..48065c5b1c42 100644 --- a/pkg/reconciler/revision/reconcile_resources.go +++ b/pkg/reconciler/revision/reconcile_resources.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/kmeta" "knative.dev/pkg/logging" "knative.dev/pkg/logging/logkey" v1 "knative.dev/serving/pkg/apis/serving/v1" @@ -113,18 +114,20 @@ func (c *Reconciler) reconcileImageCache(ctx context.Context, rev *v1.Revision) logger := logging.FromContext(ctx) ns := rev.Namespace - imageName := resourcenames.ImageCache(rev) - _, err := c.imageLister.Images(ns).Get(imageName) - if apierrs.IsNotFound(err) { - _, err := c.createImageCache(ctx, rev) - if err != nil { - return fmt.Errorf("failed to create image cache %q: %w", imageName, err) + // Revisions are immutable. + // Updating image results to new revision so there won't be any chance of resource leak. + for _, container := range rev.Status.ContainerStatuses { + imageName := kmeta.ChildName(resourcenames.ImageCache(rev), "-"+container.Name) + if _, err := c.imageLister.Images(ns).Get(imageName); apierrs.IsNotFound(err) { + if _, err := c.createImageCache(rev, container.Name, container.ImageDigest); err != nil { + return fmt.Errorf("failed to create image cache %q: %w", imageName, err) + } + logger.Infof("Created image cache %q", imageName) + } else if err != nil { + return fmt.Errorf("failed to get image cache %q: %w", imageName, err) } - logger.Infof("Created image cache %q", imageName) - } else if err != nil { - return fmt.Errorf("failed to get image cache %q: %w", imageName, err) - } + } return nil } diff --git a/pkg/reconciler/revision/resources/imagecache.go b/pkg/reconciler/revision/resources/imagecache.go index d2816b89fb73..1355705f304a 100644 --- a/pkg/reconciler/revision/resources/imagecache.go +++ b/pkg/reconciler/revision/resources/imagecache.go @@ -27,15 +27,10 @@ import ( ) // MakeImageCache makes an caching.Image resources from a revision. -func MakeImageCache(rev *v1.Revision) *caching.Image { - image := rev.Status.DeprecatedImageDigest - if image == "" { - image = rev.Spec.GetContainer().Image - } - +func MakeImageCache(rev *v1.Revision, containerName, image string) *caching.Image { img := &caching.Image{ ObjectMeta: metav1.ObjectMeta{ - Name: names.ImageCache(rev), + Name: kmeta.ChildName(names.ImageCache(rev), "-"+containerName), Namespace: rev.Namespace, Labels: makeLabels(rev), Annotations: kmeta.FilterMap(rev.GetAnnotations(), func(k string) bool { diff --git a/pkg/reconciler/revision/resources/imagecache_test.go b/pkg/reconciler/revision/resources/imagecache_test.go index 7015476718cf..4b1ce2265062 100644 --- a/pkg/reconciler/revision/resources/imagecache_test.go +++ b/pkg/reconciler/revision/resources/imagecache_test.go @@ -31,9 +31,11 @@ import ( func TestMakeImageCache(t *testing.T) { tests := []struct { - name string - rev *v1.Revision - want *caching.Image + name string + rev *v1.Revision + containerName string + image string + want *caching.Image }{{ name: "simple container", rev: &v1.Revision{ @@ -55,13 +57,19 @@ func TestMakeImageCache(t *testing.T) { }, }, Status: v1.RevisionStatus{ + ContainerStatuses: []v1.ContainerStatuses{{ + Name: "user-container", + ImageDigest: "busybox@sha256:deadbeef", + }}, DeprecatedImageDigest: "busybox@sha256:deadbeef", }, }, + containerName: "user-container", + image: "busybox@sha256:deadbeef", want: &caching.Image{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", - Name: "bar-cache", + Name: "bar-cache-user-container", Labels: map[string]string{ serving.RevisionLabelKey: "bar", serving.RevisionUID: "1234", @@ -83,6 +91,127 @@ func TestMakeImageCache(t *testing.T) { Image: "busybox@sha256:deadbeef", }, }, + }, { + name: "multiple containers", + rev: &v1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + Annotations: map[string]string{ + "a": "b", + serving.RevisionLastPinnedAnnotationKey: "c", + }, + UID: "1234", + }, + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "ubuntu", + Ports: []corev1.ContainerPort{{ + ContainerPort: 80, + }}, + }, { + Image: "busybox", + }}, + }, + }, + Status: v1.RevisionStatus{ + ContainerStatuses: []v1.ContainerStatuses{{ + Name: "user-container1", + ImageDigest: "ubuntu@sha256:deadbeef1", + }, { + Name: "user-container", + ImageDigest: "busybox@sha256:deadbeef", + }}, + DeprecatedImageDigest: "busybox@sha256:deadbeef", + }, + }, + containerName: "user-container1", + image: "ubuntu@sha256:deadbeef1", + want: &caching.Image{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar-cache-user-container1", + Labels: map[string]string{ + serving.RevisionLabelKey: "bar", + serving.RevisionUID: "1234", + AppLabelKey: "bar", + }, + Annotations: map[string]string{ + "a": "b", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: "Revision", + Name: "bar", + UID: "1234", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: caching.ImageSpec{ + Image: "ubuntu@sha256:deadbeef1", + }, + }, + }, { + name: "Image cache for multiple containers when image digests are empty", + rev: &v1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + Annotations: map[string]string{ + "a": "b", + serving.RevisionLastPinnedAnnotationKey: "c", + }, + UID: "1234", + }, + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "user-container", + Image: "ubuntu", + Ports: []corev1.ContainerPort{{ + ContainerPort: 80, + }}, + }, { + Name: "user-container1", + Image: "busybox", + }}, + }, + }, + Status: v1.RevisionStatus{ + ContainerStatuses: []v1.ContainerStatuses{{}}, + }, + }, + containerName: "user-container", + image: "", + want: &caching.Image{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar-cache-user-container", + Labels: map[string]string{ + serving.RevisionLabelKey: "bar", + serving.RevisionUID: "1234", + AppLabelKey: "bar", + }, + Annotations: map[string]string{ + "a": "b", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: "Revision", + Name: "bar", + UID: "1234", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: caching.ImageSpec{ + Image: "", + }, + }, }, { name: "with service account", rev: &v1.Revision{ @@ -96,15 +225,24 @@ func TestMakeImageCache(t *testing.T) { PodSpec: corev1.PodSpec{ ServiceAccountName: "privilegeless", Containers: []corev1.Container{{ + Name: "user-container", Image: "busybox", }}, }, }, + Status: v1.RevisionStatus{ + ContainerStatuses: []v1.ContainerStatuses{{ + Name: "user-container", + ImageDigest: "busybox@sha256:deadbeef", + }}, + }, }, + containerName: "user-container", + image: "busybox@sha256:deadbeef", want: &caching.Image{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", - Name: "bar-cache", + Name: "bar-cache-user-container", Labels: map[string]string{ serving.RevisionLabelKey: "bar", serving.RevisionUID: "1234", @@ -121,7 +259,7 @@ func TestMakeImageCache(t *testing.T) { }}, }, Spec: caching.ImageSpec{ - Image: "busybox", + Image: "busybox@sha256:deadbeef", ServiceAccountName: "privilegeless", }, }, @@ -129,7 +267,7 @@ func TestMakeImageCache(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - got := MakeImageCache(test.rev) + got := MakeImageCache(test.rev, test.containerName, test.image) if diff := cmp.Diff(test.want, got); diff != "" { t.Errorf("MakeImageCache (-want, +got) = %v", diff) } diff --git a/pkg/reconciler/revision/revision_test.go b/pkg/reconciler/revision/revision_test.go index e385a9dab752..c4fe468b4740 100644 --- a/pkg/reconciler/revision/revision_test.go +++ b/pkg/reconciler/revision/revision_test.go @@ -111,7 +111,7 @@ func testReadyPA(rev *v1.Revision) *av1alpha1.PodAutoscaler { return pa } -func newTestControllerWithConfig(t *testing.T, deploymentConfig *deployment.Config, configs []*corev1.ConfigMap, opts ...reconcilerOption) ( +func newTestControllerWithConfig(t *testing.T, configs []*corev1.ConfigMap, opts ...reconcilerOption) ( context.Context, []controller.Informer, *controller.Impl, @@ -233,12 +233,14 @@ func addResourcesToInformers(t *testing.T, ctx context.Context, rev *v1.Revision fakepainformer.Get(ctx).Informer().GetIndexer().Add(pa) } - imageName := resourcenames.ImageCache(rev) - image, err := fakecachingclient.Get(ctx).CachingV1alpha1().Images(rev.Namespace).Get(imageName, metav1.GetOptions{}) - if err != nil { - t.Errorf("Caching.Images.Get(%v) = %v", imageName, err) - } else { - fakeimageinformer.Get(ctx).Informer().GetIndexer().Add(image) + for _, v := range rev.Spec.Containers { + imageName := kmeta.ChildName(resourcenames.ImageCache(rev), "-"+v.Name) + image, err := fakecachingclient.Get(ctx).CachingV1alpha1().Images(rev.Namespace).Get(imageName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Caching.Images.Get(%v) = %v", imageName, err) + } else { + fakeimageinformer.Get(ctx).Informer().GetIndexer().Add(image) + } } deploymentName := resourcenames.Deployment(rev) @@ -299,8 +301,7 @@ func TestResolutionFailed(t *testing.T) { // TODO(mattmoor): add coverage of a Reconcile fixing a stale logging URL func TestUpdateRevWithWithUpdatedLoggingURL(t *testing.T) { - deploymentConfig := getTestDeploymentConfig() - ctx, _, controller, watcher := newTestControllerWithConfig(t, deploymentConfig, []*corev1.ConfigMap{{ + ctx, _, controller, watcher := newTestControllerWithConfig(t, []*corev1.ConfigMap{{ ObjectMeta: metav1.ObjectMeta{ Namespace: system.Namespace(), Name: metrics.ConfigMapName(), @@ -341,16 +342,7 @@ func TestUpdateRevWithWithUpdatedLoggingURL(t *testing.T) { } func TestRevWithImageDigests(t *testing.T) { - deploymentConfig := getTestDeploymentConfig() - ctx, _, controller, _ := newTestControllerWithConfig(t, deploymentConfig, []*corev1.ConfigMap{{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.DefaultsConfigName, - Namespace: system.Namespace(), - }, - Data: map[string]string{ - "container-name-template": "user-container", - }, - }}) + ctx, _, controller, _ := newTestControllerWithConfig(t, nil) rev := testRevision(corev1.PodSpec{ Containers: []corev1.Container{{ @@ -541,8 +533,7 @@ func TestGlobalResyncOnConfigMapUpdateRevision(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - controllerConfig := getTestDeploymentConfig() - ctx, informers, ctrl, watcher := newTestControllerWithConfig(t, controllerConfig, nil) + ctx, informers, ctrl, watcher := newTestControllerWithConfig(t, nil) ctx, cancel := context.WithCancel(ctx) grp := errgroup.Group{} @@ -678,8 +669,7 @@ func TestGlobalResyncOnConfigMapUpdateDeployment(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - controllerConfig := getTestDeploymentConfig() - ctx, informers, ctrl, watcher := newTestControllerWithConfig(t, controllerConfig, nil) + ctx, informers, ctrl, watcher := newTestControllerWithConfig(t, nil) ctx, cancel := context.WithCancel(ctx) grp := errgroup.Group{} diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index 077f60572198..4a12c0599a0e 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -85,7 +85,9 @@ func TestReconcile(t *testing.T) { WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "first-reconcile", // The first reconciliation Populates the following status properties. - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithImageDigests), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "first-reconcile"}, + })), }}, Key: "foo/first-reconcile", }, { @@ -108,7 +110,9 @@ func TestReconcile(t *testing.T) { WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "update-status-failure", // Despite failure, the following status properties are set. - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithImageDigests), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "update-status-failure"}, + })), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for %q: %v", @@ -136,7 +140,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "create-pa-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), WithImageDigests), + MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "create-pa-failure"}})), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", `failed to create PA "create-pa-failure": inducing failure for create podautoscalers`), @@ -162,7 +166,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "create-user-deploy-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), WithImageDigests), + MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "create-user-deploy-failure"}})), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", @@ -176,7 +180,8 @@ func TestReconcile(t *testing.T) { // state (immediately post-creation), and verify that no changes // are necessary. Objects: []runtime.Object{ - rev("foo", "stable-reconcile", WithLogURL, AllUnknownConditions, WithImageDigests), + rev("foo", "stable-reconcile", WithLogURL, AllUnknownConditions, + WithContainerStatuses([]v1.ContainerStatuses{{Name: "stable-reconcile"}})), pa("foo", "stable-reconcile", WithReachability(asv1a1.ReachabilityUnknown)), deploy(t, "foo", "stable-reconcile"), @@ -190,7 +195,7 @@ func TestReconcile(t *testing.T) { // with our desired spec. Objects: []runtime.Object{ rev("foo", "fix-containers", - WithLogURL, AllUnknownConditions, WithImageDigests), + WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-containers"}})), pa("foo", "fix-containers", WithReachability(asv1a1.ReachabilityUnknown)), changeContainers(deploy(t, "foo", "fix-containers")), image("foo", "fix-containers"), @@ -208,7 +213,7 @@ func TestReconcile(t *testing.T) { }, Objects: []runtime.Object{ rev("foo", "failure-update-deploy", - withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, WithImageDigests), + withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "failure-update-deploy"}})), pa("foo", "failure-update-deploy"), changeContainers(deploy(t, "foo", "failure-update-deploy")), image("foo", "failure-update-deploy"), @@ -229,7 +234,7 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "stable-deactivation", WithLogURL, MarkRevisionReady, - MarkInactive("NoTraffic", "This thing is inactive."), WithImageDigests), + MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{{Name: "stable-deactivation"}})), pa("foo", "stable-deactivation", WithNoTraffic("NoTraffic", "This thing is inactive.")), deploy(t, "foo", "stable-deactivation"), @@ -250,7 +255,7 @@ func TestReconcile(t *testing.T) { WithLogURL, // When the endpoint and pa are ready, then we will see the // Revision become ready. - MarkRevisionReady, WithImageDigests), + MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-ready"}})), }}, WantEvents: []string{ Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), @@ -271,7 +276,7 @@ func TestReconcile(t *testing.T) { }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-not-ready", - WithLogURL, MarkRevisionReady, WithImageDigests, + WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-not-ready"}}), withK8sServiceName("its-not-confidential"), // When we reconcile a ready state and our pa is in an activating // state, we should see the following mutation. @@ -292,7 +297,7 @@ func TestReconcile(t *testing.T) { }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-inactive", - WithLogURL, MarkRevisionReady, WithImageDigests, + WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-inactive"}}), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. MarkInactive("NoTraffic", "This thing is inactive.")), @@ -317,7 +322,7 @@ func TestReconcile(t *testing.T) { withK8sServiceName("pa-inactive-svc"), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. - MarkInactive("NoTraffic", "This thing is inactive."), WithImageDigests), + MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-inactive"}})), }}, Key: "foo/pa-inactive", }, { @@ -338,7 +343,7 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When our reconciliation has to change the service // we should see the following mutations to status. - withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, WithImageDigests), + withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-mutated-pa"}})), }}, WantUpdates: []clientgotesting.UpdateActionImpl{{ Object: pa("foo", "fix-mutated-pa", WithTraffic, @@ -351,7 +356,7 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "fix-mutated-pa-fail", withK8sServiceName("some-old-stuff"), - WithLogURL, AllUnknownConditions, WithImageDigests), + WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-mutated-pa-fail"}})), pa("foo", "fix-mutated-pa-fail", WithProtocolType(networking.ProtocolH2C), WithReachability(asv1a1.ReachabilityUnknown)), deploy(t, "foo", "fix-mutated-pa-fail"), image("foo", "fix-mutated-pa-fail"), @@ -386,7 +391,7 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the PDE state. - MarkProgressDeadlineExceeded("I timed out!"), WithImageDigests), + MarkProgressDeadlineExceeded("I timed out!"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "deploy-timeout"}})), }}, Key: "foo/deploy-timeout", }, { @@ -407,147 +412,150 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the FailedCreate state. - MarkResourcesUnavailable("FailedCreate", "I replica failed!"), WithImageDigests), + MarkResourcesUnavailable("FailedCreate", "I replica failed!"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "deploy-replica-failure"}})), }}, Key: "foo/deploy-replica-failure", - }, { - Name: "surface ImagePullBackoff", - // Test the propagation of ImagePullBackoff from user container. - Objects: []runtime.Object{ - rev("foo", "pull-backoff", - withK8sServiceName("the-taxman"), WithLogURL, MarkActivating("Deploying", "")), - pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnknown)), // pa can't be ready since deployment times out. - pod(t, "foo", "pull-backoff", WithWaitingContainer("user-container", "ImagePullBackoff", "can't pull it")), - timeoutDeploy(deploy(t, "foo", "pull-backoff"), "Timed out!"), - image("foo", "pull-backoff"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pull-backoff", - WithLogURL, AllUnknownConditions, - MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), WithImageDigests), - }}, - WantUpdates: []clientgotesting.UpdateActionImpl{{ - Object: pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnreachable)), - }}, - Key: "foo/pull-backoff", - }, { - Name: "surface pod errors", - // Test the propagation of the termination state of a Pod into the revision. - // This initializes the world to the stable state after its first reconcile, - // but changes the user deployment to have a failing pod. It then verifies - // that Reconcile propagates this into the status of the Revision. - Objects: []runtime.Object{ - rev("foo", "pod-error", - withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive), - pa("foo", "pod-error"), // PA can't be ready, since no traffic. - pod(t, "foo", "pod-error", WithFailingContainer("user-container", 5, "I failed man!")), - deploy(t, "foo", "pod-error"), - image("foo", "pod-error"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pod-error", - WithLogURL, AllUnknownConditions, MarkContainerExiting(5, v1.RevisionContainerExitingMessage("I failed man!")), WithImageDigests), - }}, - Key: "foo/pod-error", - }, { - Name: "surface pod schedule errors", - // Test the propagation of the scheduling errors of Pod into the revision. - // This initializes the world to unschedule pod. It then verifies - // that Reconcile propagates this into the status of the Revision. - Objects: []runtime.Object{ - rev("foo", "pod-schedule-error", - withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive), - pa("foo", "pod-schedule-error"), // PA can't be ready, since no traffic. - pod(t, "foo", "pod-schedule-error", WithUnschedulableContainer("Insufficient energy", "Unschedulable")), - deploy(t, "foo", "pod-schedule-error"), - image("foo", "pod-schedule-error"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pod-schedule-error", - WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", "Unschedulable"), WithImageDigests), - }}, - Key: "foo/pod-schedule-error", - }, { - Name: "ready steady state", - // Test the transition that Reconcile makes when Endpoints become ready on the - // SKS owned services, which is signalled by pa having service name. - // This puts the world into the stable post-reconcile state for an Active - // Revision. It then creates an Endpoints resource with active subsets. - // This signal should make our Reconcile mark the Revision as Ready. - Objects: []runtime.Object{ - rev("foo", "steady-ready", withK8sServiceName("very-steady"), WithLogURL), - pa("foo", "steady-ready", WithTraffic, WithPAStatusService("steadier-even")), - deploy(t, "foo", "steady-ready"), - image("foo", "steady-ready"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, - // All resources are ready to go, we should see the revision being - // marked ready - MarkRevisionReady, WithImageDigests), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), - }, - Key: "foo/steady-ready", - }, { - Name: "lost pa owner ref", - WantErr: true, - Objects: []runtime.Object{ - rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, - MarkRevisionReady), - pa("foo", "missing-owners", WithTraffic, WithPodAutoscalerOwnersRemoved), - deploy(t, "foo", "missing-owners"), - image("foo", "missing-owners"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, - MarkRevisionReady, - // When we're missing the OwnerRef for PodAutoscaler we see this update. - MarkResourceNotOwned("PodAutoscaler", "missing-owners"), WithImageDigests), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own PodAutoscaler: "missing-owners"`), - }, - Key: "foo/missing-owners", - }, { - Name: "lost deployment owner ref", - WantErr: true, - Objects: []runtime.Object{ - rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, - MarkRevisionReady, WithImageDigests), - pa("foo", "missing-owners", WithTraffic), - noOwner(deploy(t, "foo", "missing-owners")), - image("foo", "missing-owners"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, - MarkRevisionReady, - // When we're missing the OwnerRef for Deployment we see this update. - MarkResourceNotOwned("Deployment", "missing-owners-deployment"), WithImageDigests), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own Deployment: "missing-owners-deployment"`), - }, - Key: "foo/missing-owners", - }, { - Name: "image pull secrets", - // This test case tests that the image pull secrets from revision propagate to deployment and image - Objects: []runtime.Object{ - rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret")), - }, - WantCreates: []runtime.Object{ - pa("foo", "image-pull-secrets"), - deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets"), "foo-secret"), - imagePullSecrets(image("foo", "image-pull-secrets"), "foo-secret"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "image-pull-secrets", - WithImagePullSecrets("foo-secret"), - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithImageDigests), - }}, - Key: "foo/image-pull-secrets", - }} + }, + { + Name: "surface ImagePullBackoff", + // Test the propagation of ImagePullBackoff from user container. + Objects: []runtime.Object{ + rev("foo", "pull-backoff", + withK8sServiceName("the-taxman"), WithLogURL, MarkActivating("Deploying", ""), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pull-backoff"}})), + pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnknown)), // pa can't be ready since deployment times out. + pod(t, "foo", "pull-backoff", WithWaitingContainer("pull-backoff", "ImagePullBackoff", "can't pull it")), + timeoutDeploy(deploy(t, "foo", "pull-backoff"), "Timed out!"), + image("foo", "pull-backoff"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pull-backoff", + WithLogURL, AllUnknownConditions, + MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pull-backoff"}})), + }}, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnreachable)), + }}, + Key: "foo/pull-backoff", + }, { + Name: "surface pod errors", + // Test the propagation of the termination state of a Pod into the revision. + // This initializes the world to the stable state after its first reconcile, + // but changes the user deployment to have a failing pod. It then verifies + // that Reconcile propagates this into the status of the Revision. + Objects: []runtime.Object{ + rev("foo", "pod-error", + withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive), + pa("foo", "pod-error"), // PA can't be ready, since no traffic. + pod(t, "foo", "pod-error", WithFailingContainer("pod-error", 5, "I failed man!")), + deploy(t, "foo", "pod-error"), + image("foo", "pod-error"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pod-error", + WithLogURL, AllUnknownConditions, MarkContainerExiting(5, + v1.RevisionContainerExitingMessage("I failed man!")), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pod-error"}})), + }}, + Key: "foo/pod-error", + }, { + Name: "surface pod schedule errors", + // Test the propagation of the scheduling errors of Pod into the revision. + // This initializes the world to unschedule pod. It then verifies + // that Reconcile propagates this into the status of the Revision. + Objects: []runtime.Object{ + rev("foo", "pod-schedule-error", + withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive), + pa("foo", "pod-schedule-error"), // PA can't be ready, since no traffic. + pod(t, "foo", "pod-schedule-error", WithUnschedulableContainer("Insufficient energy", "Unschedulable")), + deploy(t, "foo", "pod-schedule-error"), + image("foo", "pod-schedule-error"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pod-schedule-error", + WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", + "Unschedulable"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pod-schedule-error"}})), + }}, + Key: "foo/pod-schedule-error", + }, { + Name: "ready steady state", + // Test the transition that Reconcile makes when Endpoints become ready on the + // SKS owned services, which is signalled by pa having service name. + // This puts the world into the stable post-reconcile state for an Active + // Revision. It then creates an Endpoints resource with active subsets. + // This signal should make our Reconcile mark the Revision as Ready. + Objects: []runtime.Object{ + rev("foo", "steady-ready", withK8sServiceName("very-steady"), WithLogURL), + pa("foo", "steady-ready", WithTraffic, WithPAStatusService("steadier-even")), + deploy(t, "foo", "steady-ready"), + image("foo", "steady-ready"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, + // All resources are ready to go, we should see the revision being + // marked ready + MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "steady-ready"}})), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), + }, + Key: "foo/steady-ready", + }, { + Name: "lost pa owner ref", + WantErr: true, + Objects: []runtime.Object{ + rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, + MarkRevisionReady), + pa("foo", "missing-owners", WithTraffic, WithPodAutoscalerOwnersRemoved), + deploy(t, "foo", "missing-owners"), + image("foo", "missing-owners"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, + MarkRevisionReady, + // When we're missing the OwnerRef for PodAutoscaler we see this update. + MarkResourceNotOwned("PodAutoscaler", "missing-owners"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own PodAutoscaler: "missing-owners"`), + }, + Key: "foo/missing-owners", + }, { + Name: "lost deployment owner ref", + WantErr: true, + Objects: []runtime.Object{ + rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, + MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), + pa("foo", "missing-owners", WithTraffic), + noOwner(deploy(t, "foo", "missing-owners")), + image("foo", "missing-owners"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, + MarkRevisionReady, + // When we're missing the OwnerRef for Deployment we see this update. + MarkResourceNotOwned("Deployment", "missing-owners-deployment"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own Deployment: "missing-owners-deployment"`), + }, + Key: "foo/missing-owners", + }, { + Name: "image pull secrets", + // This test case tests that the image pull secrets from revision propagate to deployment and image + Objects: []runtime.Object{ + rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret")), + }, + WantCreates: []runtime.Object{ + pa("foo", "image-pull-secrets"), + deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets"), "foo-secret"), + imagePullSecrets(image("foo", "image-pull-secrets"), "foo-secret"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "image-pull-secrets", + WithImagePullSecrets("foo-secret"), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "image-pull-secrets"}})), + }}, + Key: "foo/image-pull-secrets", + }} table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { r := &Reconciler{ @@ -633,6 +641,7 @@ func rev(namespace, name string, ro ...RevisionOption) *v1.Revision { Spec: v1.RevisionSpec{ PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ + Name: name, Image: "busybox", }}, }, @@ -685,7 +694,6 @@ func deploy(t *testing.T, namespace, name string, opts ...interface{}) *appsv1.D deployment, err := resources.MakeDeployment(rev, cfg.Logging, cfg.Tracing, cfg.Network, cfg.Observability, cfg.Autoscaler, cfg.Deployment, ) - if err != nil { t.Fatal("failed to create deployment") } @@ -698,7 +706,7 @@ func image(namespace, name string, co ...configOption) *caching.Image { opt(config) } - return resources.MakeImageCache(rev(namespace, name)) + return resources.MakeImageCache(rev(namespace, name), name, "") } func pa(namespace, name string, ko ...PodAutoscalerOption) *asv1a1.PodAutoscaler { diff --git a/pkg/testing/v1/revision.go b/pkg/testing/v1/revision.go index ead895150a33..2877a5fccf2b 100644 --- a/pkg/testing/v1/revision.go +++ b/pkg/testing/v1/revision.go @@ -183,9 +183,9 @@ func WithRevisionLabel(key, value string) RevisionOption { } } -// WithImageDigests sets the .Status.ImageDigests to the Revision. -func WithImageDigests(r *v1.Revision) { - r.Status.ContainerStatuses = []v1.ContainerStatuses{{ - Name: "user-container", - }} +// WithContainerStatuses sets the .Status.ContainerStatuses to the Revision. +func WithContainerStatuses(containerStatus []v1.ContainerStatuses) RevisionOption { + return func(r *v1.Revision) { + r.Status.ContainerStatuses = containerStatus + } }