From fffc03ef3d8aa272b62bf57474670a973ae9d571 Mon Sep 17 00:00:00 2001 From: savitaashture Date: Tue, 21 Apr 2020 15:46:39 +0530 Subject: [PATCH 1/5] Add ImageCache for multi container support --- pkg/reconciler/revision/cruds.go | 4 +- .../revision/reconcile_resources.go | 21 +- .../revision/resources/imagecache.go | 11 +- .../revision/resources/imagecache_test.go | 71 +++- pkg/reconciler/revision/revision_test.go | 36 +- pkg/reconciler/revision/table_test.go | 381 +++++++++--------- pkg/testing/v1/revision.go | 10 +- 7 files changed, 303 insertions(+), 231 deletions(-) diff --git a/pkg/reconciler/revision/cruds.go b/pkg/reconciler/revision/cruds.go index d11837b8124d..10cc4a8c418d 100644 --- a/pkg/reconciler/revision/cruds.go +++ b/pkg/reconciler/revision/cruds.go @@ -109,8 +109,8 @@ func (c *Reconciler) checkAndUpdateDeployment(ctx context.Context, rev *v1.Revis return d, nil } -func (c *Reconciler) createImageCache(ctx context.Context, rev *v1.Revision) (*caching.Image, error) { - image := resources.MakeImageCache(rev) +func (c *Reconciler) createImageCache(rev *v1.Revision, name string) (*caching.Image, error) { + image := resources.MakeImageCache(rev, name) return c.cachingclient.CachingV1alpha1().Images(image.Namespace).Create(image) } diff --git a/pkg/reconciler/revision/reconcile_resources.go b/pkg/reconciler/revision/reconcile_resources.go index 13360eae0cd6..35b9e96b9bac 100644 --- a/pkg/reconciler/revision/reconcile_resources.go +++ b/pkg/reconciler/revision/reconcile_resources.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/kmeta" "knative.dev/pkg/logging" "knative.dev/pkg/logging/logkey" v1 "knative.dev/serving/pkg/apis/serving/v1" @@ -113,18 +114,18 @@ func (c *Reconciler) reconcileImageCache(ctx context.Context, rev *v1.Revision) logger := logging.FromContext(ctx) ns := rev.Namespace - imageName := resourcenames.ImageCache(rev) - _, err := c.imageLister.Images(ns).Get(imageName) - if apierrs.IsNotFound(err) { - _, err := c.createImageCache(ctx, rev) - if err != nil { - return fmt.Errorf("failed to create image cache %q: %w", imageName, err) + for _, container := range rev.Spec.Containers { + imageName := kmeta.ChildName(resourcenames.ImageCache(rev), "-"+container.Name) + if _, err := c.imageLister.Images(ns).Get(imageName); apierrs.IsNotFound(err) { + if _, err := c.createImageCache(rev, container.Name); err != nil { + return fmt.Errorf("failed to create image cache %q: %w", imageName, err) + } + logger.Infof("Created image cache %q", imageName) + } else if err != nil { + return fmt.Errorf("failed to get image cache %q: %w", imageName, err) } - logger.Infof("Created image cache %q", imageName) - } else if err != nil { - return fmt.Errorf("failed to get image cache %q: %w", imageName, err) - } + } return nil } diff --git a/pkg/reconciler/revision/resources/imagecache.go b/pkg/reconciler/revision/resources/imagecache.go index d2816b89fb73..2e75d2df1941 100644 --- a/pkg/reconciler/revision/resources/imagecache.go +++ b/pkg/reconciler/revision/resources/imagecache.go @@ -27,15 +27,20 @@ import ( ) // MakeImageCache makes an caching.Image resources from a revision. -func MakeImageCache(rev *v1.Revision) *caching.Image { - image := rev.Status.DeprecatedImageDigest +func MakeImageCache(rev *v1.Revision, name string) *caching.Image { + var image string + for _, digest := range rev.Status.ContainerStatuses { + if digest.Name == name { + image = digest.ImageDigest + } + } if image == "" { image = rev.Spec.GetContainer().Image } img := &caching.Image{ ObjectMeta: metav1.ObjectMeta{ - Name: names.ImageCache(rev), + Name: kmeta.ChildName(names.ImageCache(rev), "-"+name), Namespace: rev.Namespace, Labels: makeLabels(rev), Annotations: kmeta.FilterMap(rev.GetAnnotations(), func(k string) bool { diff --git a/pkg/reconciler/revision/resources/imagecache_test.go b/pkg/reconciler/revision/resources/imagecache_test.go index 7015476718cf..c5c60cb3447d 100644 --- a/pkg/reconciler/revision/resources/imagecache_test.go +++ b/pkg/reconciler/revision/resources/imagecache_test.go @@ -55,13 +55,78 @@ func TestMakeImageCache(t *testing.T) { }, }, Status: v1.RevisionStatus{ + ContainerStatuses: []v1.ContainerStatuses{{ + Name: "user-container", + ImageDigest: "busybox@sha256:deadbeef", + }}, + DeprecatedImageDigest: "busybox@sha256:deadbeef", + }, + }, + want: &caching.Image{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar-cache-user-container", + Labels: map[string]string{ + serving.RevisionLabelKey: "bar", + serving.RevisionUID: "1234", + AppLabelKey: "bar", + }, + Annotations: map[string]string{ + "a": "b", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: "Revision", + Name: "bar", + UID: "1234", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: caching.ImageSpec{ + Image: "busybox@sha256:deadbeef", + }, + }, + }, { + name: "multiple containers", + rev: &v1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + Annotations: map[string]string{ + "a": "b", + serving.RevisionLastPinnedAnnotationKey: "c", + }, + UID: "1234", + }, + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "ubuntu", + Ports: []corev1.ContainerPort{{ + ContainerPort: 80, + }}, + }, { + Image: "busybox", + }}, + }, + }, + Status: v1.RevisionStatus{ + ContainerStatuses: []v1.ContainerStatuses{{ + Name: "user-container1", + ImageDigest: "ubuntu@sha256:deadbeef1", + }, { + Name: "user-container", + ImageDigest: "busybox@sha256:deadbeef", + }}, DeprecatedImageDigest: "busybox@sha256:deadbeef", }, }, want: &caching.Image{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", - Name: "bar-cache", + Name: "bar-cache-user-container", Labels: map[string]string{ serving.RevisionLabelKey: "bar", serving.RevisionUID: "1234", @@ -104,7 +169,7 @@ func TestMakeImageCache(t *testing.T) { want: &caching.Image{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", - Name: "bar-cache", + Name: "bar-cache-user-container", Labels: map[string]string{ serving.RevisionLabelKey: "bar", serving.RevisionUID: "1234", @@ -129,7 +194,7 @@ func TestMakeImageCache(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - got := MakeImageCache(test.rev) + got := MakeImageCache(test.rev, "user-container") if diff := cmp.Diff(test.want, got); diff != "" { t.Errorf("MakeImageCache (-want, +got) = %v", diff) } diff --git a/pkg/reconciler/revision/revision_test.go b/pkg/reconciler/revision/revision_test.go index e385a9dab752..c4fe468b4740 100644 --- a/pkg/reconciler/revision/revision_test.go +++ b/pkg/reconciler/revision/revision_test.go @@ -111,7 +111,7 @@ func testReadyPA(rev *v1.Revision) *av1alpha1.PodAutoscaler { return pa } -func newTestControllerWithConfig(t *testing.T, deploymentConfig *deployment.Config, configs []*corev1.ConfigMap, opts ...reconcilerOption) ( +func newTestControllerWithConfig(t *testing.T, configs []*corev1.ConfigMap, opts ...reconcilerOption) ( context.Context, []controller.Informer, *controller.Impl, @@ -233,12 +233,14 @@ func addResourcesToInformers(t *testing.T, ctx context.Context, rev *v1.Revision fakepainformer.Get(ctx).Informer().GetIndexer().Add(pa) } - imageName := resourcenames.ImageCache(rev) - image, err := fakecachingclient.Get(ctx).CachingV1alpha1().Images(rev.Namespace).Get(imageName, metav1.GetOptions{}) - if err != nil { - t.Errorf("Caching.Images.Get(%v) = %v", imageName, err) - } else { - fakeimageinformer.Get(ctx).Informer().GetIndexer().Add(image) + for _, v := range rev.Spec.Containers { + imageName := kmeta.ChildName(resourcenames.ImageCache(rev), "-"+v.Name) + image, err := fakecachingclient.Get(ctx).CachingV1alpha1().Images(rev.Namespace).Get(imageName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Caching.Images.Get(%v) = %v", imageName, err) + } else { + fakeimageinformer.Get(ctx).Informer().GetIndexer().Add(image) + } } deploymentName := resourcenames.Deployment(rev) @@ -299,8 +301,7 @@ func TestResolutionFailed(t *testing.T) { // TODO(mattmoor): add coverage of a Reconcile fixing a stale logging URL func TestUpdateRevWithWithUpdatedLoggingURL(t *testing.T) { - deploymentConfig := getTestDeploymentConfig() - ctx, _, controller, watcher := newTestControllerWithConfig(t, deploymentConfig, []*corev1.ConfigMap{{ + ctx, _, controller, watcher := newTestControllerWithConfig(t, []*corev1.ConfigMap{{ ObjectMeta: metav1.ObjectMeta{ Namespace: system.Namespace(), Name: metrics.ConfigMapName(), @@ -341,16 +342,7 @@ func TestUpdateRevWithWithUpdatedLoggingURL(t *testing.T) { } func TestRevWithImageDigests(t *testing.T) { - deploymentConfig := getTestDeploymentConfig() - ctx, _, controller, _ := newTestControllerWithConfig(t, deploymentConfig, []*corev1.ConfigMap{{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.DefaultsConfigName, - Namespace: system.Namespace(), - }, - Data: map[string]string{ - "container-name-template": "user-container", - }, - }}) + ctx, _, controller, _ := newTestControllerWithConfig(t, nil) rev := testRevision(corev1.PodSpec{ Containers: []corev1.Container{{ @@ -541,8 +533,7 @@ func TestGlobalResyncOnConfigMapUpdateRevision(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - controllerConfig := getTestDeploymentConfig() - ctx, informers, ctrl, watcher := newTestControllerWithConfig(t, controllerConfig, nil) + ctx, informers, ctrl, watcher := newTestControllerWithConfig(t, nil) ctx, cancel := context.WithCancel(ctx) grp := errgroup.Group{} @@ -678,8 +669,7 @@ func TestGlobalResyncOnConfigMapUpdateDeployment(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - controllerConfig := getTestDeploymentConfig() - ctx, informers, ctrl, watcher := newTestControllerWithConfig(t, controllerConfig, nil) + ctx, informers, ctrl, watcher := newTestControllerWithConfig(t, nil) ctx, cancel := context.WithCancel(ctx) grp := errgroup.Group{} diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index 077f60572198..db1ebbe06b72 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -18,6 +18,7 @@ package revision import ( "context" + "fmt" "testing" appsv1 "k8s.io/api/apps/v1" @@ -65,7 +66,7 @@ func TestReconcile(t *testing.T) { Name: "nop deletion reconcile", // Test that with a DeletionTimestamp we do nothing. Objects: []runtime.Object{ - rev("foo", "delete-pending", WithRevisionDeletionTimestamp), + rev("foo", "delete-pending", "delete-pending", WithRevisionDeletionTimestamp), }, Key: "foo/delete-pending", }, { @@ -74,7 +75,7 @@ func TestReconcile(t *testing.T) { // We feed in a well formed Revision where none of its sub-resources exist, // and we expect it to create them and initialize the Revision's status. Objects: []runtime.Object{ - rev("foo", "first-reconcile"), + rev("foo", "first-reconcile", "first-reconcile"), }, WantCreates: []runtime.Object{ // The first reconciliation of a Revision creates the following resources. @@ -83,9 +84,11 @@ func TestReconcile(t *testing.T) { image("foo", "first-reconcile"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "first-reconcile", + Object: rev("foo", "first-reconcile", "first-reconcile", // The first reconciliation Populates the following status properties. - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithImageDigests), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "first-reconcile"}, + })), }}, Key: "foo/first-reconcile", }, { @@ -97,7 +100,7 @@ func TestReconcile(t *testing.T) { InduceFailure("update", "revisions"), }, Objects: []runtime.Object{ - rev("foo", "update-status-failure"), + rev("foo", "update-status-failure", "update-status-failure"), pa("foo", "update-status-failure"), }, WantCreates: []runtime.Object{ @@ -106,9 +109,11 @@ func TestReconcile(t *testing.T) { image("foo", "update-status-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "update-status-failure", + Object: rev("foo", "update-status-failure", "update-status-failure", // Despite failure, the following status properties are set. - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithImageDigests), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ + {Name: "update-status-failure"}, + })), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for %q: %v", @@ -124,7 +129,7 @@ func TestReconcile(t *testing.T) { InduceFailure("create", "podautoscalers"), }, Objects: []runtime.Object{ - rev("foo", "create-pa-failure"), + rev("foo", "create-pa-failure", "create-pa-failure"), }, WantCreates: []runtime.Object{ // We still see the following creates before the failure is induced. @@ -133,10 +138,10 @@ func TestReconcile(t *testing.T) { image("foo", "create-pa-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "create-pa-failure", + Object: rev("foo", "create-pa-failure", "create-pa-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), WithImageDigests), + MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "create-pa-failure"}})), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", `failed to create PA "create-pa-failure": inducing failure for create podautoscalers`), @@ -151,7 +156,7 @@ func TestReconcile(t *testing.T) { InduceFailure("create", "deployments"), }, Objects: []runtime.Object{ - rev("foo", "create-user-deploy-failure"), + rev("foo", "create-user-deploy-failure", "create-user-deploy-failure"), pa("foo", "create-user-deploy-failure"), }, WantCreates: []runtime.Object{ @@ -159,10 +164,10 @@ func TestReconcile(t *testing.T) { deploy(t, "foo", "create-user-deploy-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "create-user-deploy-failure", + Object: rev("foo", "create-user-deploy-failure", "create-user-deploy-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), WithImageDigests), + MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "create-user-deploy-failure"}})), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", @@ -176,7 +181,8 @@ func TestReconcile(t *testing.T) { // state (immediately post-creation), and verify that no changes // are necessary. Objects: []runtime.Object{ - rev("foo", "stable-reconcile", WithLogURL, AllUnknownConditions, WithImageDigests), + rev("foo", "stable-reconcile", "stable-reconcile", WithLogURL, AllUnknownConditions, + WithContainerStatuses([]v1.ContainerStatuses{{Name: "stable-reconcile"}})), pa("foo", "stable-reconcile", WithReachability(asv1a1.ReachabilityUnknown)), deploy(t, "foo", "stable-reconcile"), @@ -189,8 +195,8 @@ func TestReconcile(t *testing.T) { // Test that we update a deployment with new containers when they disagree // with our desired spec. Objects: []runtime.Object{ - rev("foo", "fix-containers", - WithLogURL, AllUnknownConditions, WithImageDigests), + rev("foo", "fix-containers", "fix-containers", + WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-containers"}})), pa("foo", "fix-containers", WithReachability(asv1a1.ReachabilityUnknown)), changeContainers(deploy(t, "foo", "fix-containers")), image("foo", "fix-containers"), @@ -207,8 +213,8 @@ func TestReconcile(t *testing.T) { InduceFailure("update", "deployments"), }, Objects: []runtime.Object{ - rev("foo", "failure-update-deploy", - withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, WithImageDigests), + rev("foo", "failure-update-deploy", "failure-update-deploy", + withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "failure-update-deploy"}})), pa("foo", "failure-update-deploy"), changeContainers(deploy(t, "foo", "failure-update-deploy")), image("foo", "failure-update-deploy"), @@ -227,9 +233,9 @@ func TestReconcile(t *testing.T) { // We feed in a Revision and the resources it controls in a steady // state (port-Reserve), and verify that no changes are necessary. Objects: []runtime.Object{ - rev("foo", "stable-deactivation", + rev("foo", "stable-deactivation", "stable-deactivation", WithLogURL, MarkRevisionReady, - MarkInactive("NoTraffic", "This thing is inactive."), WithImageDigests), + MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{{Name: "stable-deactivation"}})), pa("foo", "stable-deactivation", WithNoTraffic("NoTraffic", "This thing is inactive.")), deploy(t, "foo", "stable-deactivation"), @@ -239,18 +245,18 @@ func TestReconcile(t *testing.T) { }, { Name: "pa is ready", Objects: []runtime.Object{ - rev("foo", "pa-ready", + rev("foo", "pa-ready", "pa-ready", withK8sServiceName("old-stuff"), WithLogURL, AllUnknownConditions), pa("foo", "pa-ready", WithTraffic, WithPAStatusService("new-stuff"), WithReachability(asv1a1.ReachabilityUnknown)), deploy(t, "foo", "pa-ready"), image("foo", "pa-ready"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pa-ready", withK8sServiceName("new-stuff"), + Object: rev("foo", "pa-ready", "pa-ready", withK8sServiceName("new-stuff"), WithLogURL, // When the endpoint and pa are ready, then we will see the // Revision become ready. - MarkRevisionReady, WithImageDigests), + MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-ready"}})), }}, WantEvents: []string{ Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), @@ -260,7 +266,7 @@ func TestReconcile(t *testing.T) { Name: "pa not ready", // Test propagating the pa not ready status to the Revision. Objects: []runtime.Object{ - rev("foo", "pa-not-ready", + rev("foo", "pa-not-ready", "pa-not-ready", withK8sServiceName("somebody-told-me"), WithLogURL, MarkRevisionReady), pa("foo", "pa-not-ready", @@ -270,8 +276,8 @@ func TestReconcile(t *testing.T) { image("foo", "pa-not-ready"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pa-not-ready", - WithLogURL, MarkRevisionReady, WithImageDigests, + Object: rev("foo", "pa-not-ready", "pa-not-ready", + WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-not-ready"}}), withK8sServiceName("its-not-confidential"), // When we reconcile a ready state and our pa is in an activating // state, we should see the following mutation. @@ -283,7 +289,7 @@ func TestReconcile(t *testing.T) { Name: "pa inactive", // Test propagating the inactivity signal from the pa to the Revision. Objects: []runtime.Object{ - rev("foo", "pa-inactive", + rev("foo", "pa-inactive", "pa-inactive", withK8sServiceName("something-in-the-way"), WithLogURL, MarkRevisionReady), pa("foo", "pa-inactive", WithNoTraffic("NoTraffic", "This thing is inactive.")), @@ -291,8 +297,8 @@ func TestReconcile(t *testing.T) { image("foo", "pa-inactive"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pa-inactive", - WithLogURL, MarkRevisionReady, WithImageDigests, + Object: rev("foo", "pa-inactive", "pa-inactive", + WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-inactive"}}), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. MarkInactive("NoTraffic", "This thing is inactive.")), @@ -303,7 +309,7 @@ func TestReconcile(t *testing.T) { // Test propagating the inactivity signal from the pa to the Revision. // But propagate the service name. Objects: []runtime.Object{ - rev("foo", "pa-inactive", + rev("foo", "pa-inactive", "pa-inactive", withK8sServiceName("here-comes-the-sun"), WithLogURL, MarkRevisionReady), pa("foo", "pa-inactive", WithNoTraffic("NoTraffic", "This thing is inactive."), @@ -312,12 +318,12 @@ func TestReconcile(t *testing.T) { image("foo", "pa-inactive"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pa-inactive", + Object: rev("foo", "pa-inactive", "pa-inactive", WithLogURL, MarkRevisionReady, withK8sServiceName("pa-inactive-svc"), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. - MarkInactive("NoTraffic", "This thing is inactive."), WithImageDigests), + MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-inactive"}})), }}, Key: "foo/pa-inactive", }, { @@ -326,7 +332,7 @@ func TestReconcile(t *testing.T) { // we bring it back to the required shape. // Protocol type is the only thing that can be changed on PA Objects: []runtime.Object{ - rev("foo", "fix-mutated-pa", + rev("foo", "fix-mutated-pa", "fix-mutated-pa", withK8sServiceName("ill-follow-the-sun"), WithLogURL, MarkRevisionReady), pa("foo", "fix-mutated-pa", WithProtocolType(networking.ProtocolH2C), WithTraffic, WithPAStatusService("fix-mutated-pa")), @@ -334,11 +340,11 @@ func TestReconcile(t *testing.T) { image("foo", "fix-mutated-pa"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "fix-mutated-pa", + Object: rev("foo", "fix-mutated-pa", "fix-mutated-pa", WithLogURL, AllUnknownConditions, // When our reconciliation has to change the service // we should see the following mutations to status. - withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, WithImageDigests), + withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-mutated-pa"}})), }}, WantUpdates: []clientgotesting.UpdateActionImpl{{ Object: pa("foo", "fix-mutated-pa", WithTraffic, @@ -349,9 +355,9 @@ func TestReconcile(t *testing.T) { Name: "mutated pa gets error during the fix", // Same as above, but will fail during the update. Objects: []runtime.Object{ - rev("foo", "fix-mutated-pa-fail", + rev("foo", "fix-mutated-pa-fail", "fix-mutated-pa-fail", withK8sServiceName("some-old-stuff"), - WithLogURL, AllUnknownConditions, WithImageDigests), + WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-mutated-pa-fail"}})), pa("foo", "fix-mutated-pa-fail", WithProtocolType(networking.ProtocolH2C), WithReachability(asv1a1.ReachabilityUnknown)), deploy(t, "foo", "fix-mutated-pa-fail"), image("foo", "fix-mutated-pa-fail"), @@ -375,18 +381,18 @@ func TestReconcile(t *testing.T) { // condition. It then verifies that Reconcile propagates this into the // status of the Revision. Objects: []runtime.Object{ - rev("foo", "deploy-timeout", + rev("foo", "deploy-timeout", "deploy-timeout", withK8sServiceName("the-taxman"), WithLogURL, MarkActive), pa("foo", "deploy-timeout"), // pa can't be ready since deployment times out. timeoutDeploy(deploy(t, "foo", "deploy-timeout"), "I timed out!"), image("foo", "deploy-timeout"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "deploy-timeout", + Object: rev("foo", "deploy-timeout", "deploy-timeout", WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the PDE state. - MarkProgressDeadlineExceeded("I timed out!"), WithImageDigests), + MarkProgressDeadlineExceeded("I timed out!"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "deploy-timeout"}})), }}, Key: "foo/deploy-timeout", }, { @@ -396,158 +402,161 @@ func TestReconcile(t *testing.T) { // but changes the user deployment to have a FailedCreate condition. // It then verifies that Reconcile propagates this into the status of the Revision. Objects: []runtime.Object{ - rev("foo", "deploy-replica-failure", + rev("foo", "deploy-replica-failure", "deploy-replica-failure", withK8sServiceName("the-taxman"), WithLogURL, MarkActive), pa("foo", "deploy-replica-failure"), replicaFailureDeploy(deploy(t, "foo", "deploy-replica-failure"), "I replica failed!"), image("foo", "deploy-replica-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "deploy-replica-failure", + Object: rev("foo", "deploy-replica-failure", "deploy-replica-failure", WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the FailedCreate state. - MarkResourcesUnavailable("FailedCreate", "I replica failed!"), WithImageDigests), + MarkResourcesUnavailable("FailedCreate", "I replica failed!"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "deploy-replica-failure"}})), }}, Key: "foo/deploy-replica-failure", - }, { - Name: "surface ImagePullBackoff", - // Test the propagation of ImagePullBackoff from user container. - Objects: []runtime.Object{ - rev("foo", "pull-backoff", - withK8sServiceName("the-taxman"), WithLogURL, MarkActivating("Deploying", "")), - pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnknown)), // pa can't be ready since deployment times out. - pod(t, "foo", "pull-backoff", WithWaitingContainer("user-container", "ImagePullBackoff", "can't pull it")), - timeoutDeploy(deploy(t, "foo", "pull-backoff"), "Timed out!"), - image("foo", "pull-backoff"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pull-backoff", - WithLogURL, AllUnknownConditions, - MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), WithImageDigests), - }}, - WantUpdates: []clientgotesting.UpdateActionImpl{{ - Object: pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnreachable)), - }}, - Key: "foo/pull-backoff", - }, { - Name: "surface pod errors", - // Test the propagation of the termination state of a Pod into the revision. - // This initializes the world to the stable state after its first reconcile, - // but changes the user deployment to have a failing pod. It then verifies - // that Reconcile propagates this into the status of the Revision. - Objects: []runtime.Object{ - rev("foo", "pod-error", - withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive), - pa("foo", "pod-error"), // PA can't be ready, since no traffic. - pod(t, "foo", "pod-error", WithFailingContainer("user-container", 5, "I failed man!")), - deploy(t, "foo", "pod-error"), - image("foo", "pod-error"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pod-error", - WithLogURL, AllUnknownConditions, MarkContainerExiting(5, v1.RevisionContainerExitingMessage("I failed man!")), WithImageDigests), - }}, - Key: "foo/pod-error", - }, { - Name: "surface pod schedule errors", - // Test the propagation of the scheduling errors of Pod into the revision. - // This initializes the world to unschedule pod. It then verifies - // that Reconcile propagates this into the status of the Revision. - Objects: []runtime.Object{ - rev("foo", "pod-schedule-error", - withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive), - pa("foo", "pod-schedule-error"), // PA can't be ready, since no traffic. - pod(t, "foo", "pod-schedule-error", WithUnschedulableContainer("Insufficient energy", "Unschedulable")), - deploy(t, "foo", "pod-schedule-error"), - image("foo", "pod-schedule-error"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pod-schedule-error", - WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", "Unschedulable"), WithImageDigests), - }}, - Key: "foo/pod-schedule-error", - }, { - Name: "ready steady state", - // Test the transition that Reconcile makes when Endpoints become ready on the - // SKS owned services, which is signalled by pa having service name. - // This puts the world into the stable post-reconcile state for an Active - // Revision. It then creates an Endpoints resource with active subsets. - // This signal should make our Reconcile mark the Revision as Ready. - Objects: []runtime.Object{ - rev("foo", "steady-ready", withK8sServiceName("very-steady"), WithLogURL), - pa("foo", "steady-ready", WithTraffic, WithPAStatusService("steadier-even")), - deploy(t, "foo", "steady-ready"), - image("foo", "steady-ready"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, - // All resources are ready to go, we should see the revision being - // marked ready - MarkRevisionReady, WithImageDigests), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), - }, - Key: "foo/steady-ready", - }, { - Name: "lost pa owner ref", - WantErr: true, - Objects: []runtime.Object{ - rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, - MarkRevisionReady), - pa("foo", "missing-owners", WithTraffic, WithPodAutoscalerOwnersRemoved), - deploy(t, "foo", "missing-owners"), - image("foo", "missing-owners"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, - MarkRevisionReady, - // When we're missing the OwnerRef for PodAutoscaler we see this update. - MarkResourceNotOwned("PodAutoscaler", "missing-owners"), WithImageDigests), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own PodAutoscaler: "missing-owners"`), - }, - Key: "foo/missing-owners", - }, { - Name: "lost deployment owner ref", - WantErr: true, - Objects: []runtime.Object{ - rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, - MarkRevisionReady, WithImageDigests), - pa("foo", "missing-owners", WithTraffic), - noOwner(deploy(t, "foo", "missing-owners")), - image("foo", "missing-owners"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, - MarkRevisionReady, - // When we're missing the OwnerRef for Deployment we see this update. - MarkResourceNotOwned("Deployment", "missing-owners-deployment"), WithImageDigests), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own Deployment: "missing-owners-deployment"`), - }, - Key: "foo/missing-owners", - }, { - Name: "image pull secrets", - // This test case tests that the image pull secrets from revision propagate to deployment and image - Objects: []runtime.Object{ - rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret")), - }, - WantCreates: []runtime.Object{ - pa("foo", "image-pull-secrets"), - deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets"), "foo-secret"), - imagePullSecrets(image("foo", "image-pull-secrets"), "foo-secret"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "image-pull-secrets", - WithImagePullSecrets("foo-secret"), - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithImageDigests), - }}, - Key: "foo/image-pull-secrets", - }} + }, + { + Name: "surface ImagePullBackoff", + // Test the propagation of ImagePullBackoff from user container. + Objects: []runtime.Object{ + rev("foo", "pull-backoff", "pull-backoff", + withK8sServiceName("the-taxman"), WithLogURL, MarkActivating("Deploying", ""), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pull-backoff"}})), + pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnknown)), // pa can't be ready since deployment times out. + pod(t, "foo", "pull-backoff", WithWaitingContainer("pull-backoff", "ImagePullBackoff", "can't pull it")), + timeoutDeploy(deploy(t, "foo", "pull-backoff"), "Timed out!"), + image("foo", "pull-backoff"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pull-backoff", "pull-backoff", + WithLogURL, AllUnknownConditions, + MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pull-backoff"}})), + }}, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnreachable)), + }}, + Key: "foo/pull-backoff", + }, { + Name: "surface pod errors", + // Test the propagation of the termination state of a Pod into the revision. + // This initializes the world to the stable state after its first reconcile, + // but changes the user deployment to have a failing pod. It then verifies + // that Reconcile propagates this into the status of the Revision. + Objects: []runtime.Object{ + rev("foo", "pod-error", "pod-error", + withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive), + pa("foo", "pod-error"), // PA can't be ready, since no traffic. + pod(t, "foo", "pod-error", WithFailingContainer("pod-error", 5, "I failed man!")), + deploy(t, "foo", "pod-error"), + image("foo", "pod-error"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pod-error", "pod-error", + WithLogURL, AllUnknownConditions, MarkContainerExiting(5, + v1.RevisionContainerExitingMessage("I failed man!")), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pod-error"}})), + }}, + Key: "foo/pod-error", + }, { + Name: "surface pod schedule errors", + // Test the propagation of the scheduling errors of Pod into the revision. + // This initializes the world to unschedule pod. It then verifies + // that Reconcile propagates this into the status of the Revision. + Objects: []runtime.Object{ + rev("foo", "pod-schedule-error", "pod-schedule-error", + withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive), + pa("foo", "pod-schedule-error"), // PA can't be ready, since no traffic. + pod(t, "foo", "pod-schedule-error", WithUnschedulableContainer("Insufficient energy", "Unschedulable")), + deploy(t, "foo", "pod-schedule-error"), + image("foo", "pod-schedule-error"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pod-schedule-error", "pod-schedule-error", + WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", + "Unschedulable"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pod-schedule-error"}})), + }}, + Key: "foo/pod-schedule-error", + }, { + Name: "ready steady state", + // Test the transition that Reconcile makes when Endpoints become ready on the + // SKS owned services, which is signalled by pa having service name. + // This puts the world into the stable post-reconcile state for an Active + // Revision. It then creates an Endpoints resource with active subsets. + // This signal should make our Reconcile mark the Revision as Ready. + Objects: []runtime.Object{ + rev("foo", "steady-ready", "steady-ready", withK8sServiceName("very-steady"), WithLogURL), + pa("foo", "steady-ready", WithTraffic, WithPAStatusService("steadier-even")), + deploy(t, "foo", "steady-ready"), + image("foo", "steady-ready"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "steady-ready", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, + // All resources are ready to go, we should see the revision being + // marked ready + MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "steady-ready"}})), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), + }, + Key: "foo/steady-ready", + }, { + Name: "lost pa owner ref", + WantErr: true, + Objects: []runtime.Object{ + rev("foo", "missing-owners", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, + MarkRevisionReady), + pa("foo", "missing-owners", WithTraffic, WithPodAutoscalerOwnersRemoved), + deploy(t, "foo", "missing-owners"), + image("foo", "missing-owners"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "missing-owners", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, + MarkRevisionReady, + // When we're missing the OwnerRef for PodAutoscaler we see this update. + MarkResourceNotOwned("PodAutoscaler", "missing-owners"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own PodAutoscaler: "missing-owners"`), + }, + Key: "foo/missing-owners", + }, { + Name: "lost deployment owner ref", + WantErr: true, + Objects: []runtime.Object{ + rev("foo", "missing-owners", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, + MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), + pa("foo", "missing-owners", WithTraffic), + noOwner(deploy(t, "foo", "missing-owners")), + image("foo", "missing-owners"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "missing-owners", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, + MarkRevisionReady, + // When we're missing the OwnerRef for Deployment we see this update. + MarkResourceNotOwned("Deployment", "missing-owners-deployment"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own Deployment: "missing-owners-deployment"`), + }, + Key: "foo/missing-owners", + }, { + Name: "image pull secrets", + // This test case tests that the image pull secrets from revision propagate to deployment and image + Objects: []runtime.Object{ + rev("foo", "image-pull-secrets", "image-pull-secrets", WithImagePullSecrets("foo-secret")), + }, + WantCreates: []runtime.Object{ + pa("foo", "image-pull-secrets"), + deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets"), "foo-secret"), + imagePullSecrets(image("foo", "image-pull-secrets"), "foo-secret"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "image-pull-secrets", "image-pull-secrets", + WithImagePullSecrets("foo-secret"), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "image-pull-secrets"}})), + }}, + Key: "foo/image-pull-secrets", + }} table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { r := &Reconciler{ @@ -623,7 +632,7 @@ func changeContainers(deploy *appsv1.Deployment) *appsv1.Deployment { return deploy } -func rev(namespace, name string, ro ...RevisionOption) *v1.Revision { +func rev(namespace, name, containerName string, ro ...RevisionOption) *v1.Revision { r := &v1.Revision{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -633,6 +642,7 @@ func rev(namespace, name string, ro ...RevisionOption) *v1.Revision { Spec: v1.RevisionSpec{ PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ + Name: containerName, Image: "busybox", }}, }, @@ -671,7 +681,7 @@ func deploy(t *testing.T, namespace, name string, opts ...interface{}) *appsv1.D } } - rev := rev(namespace, name) + rev := rev(namespace, name, name) for _, opt := range opts { if revOpt, ok := opt.(RevisionOption); ok { @@ -686,6 +696,7 @@ func deploy(t *testing.T, namespace, name string, opts ...interface{}) *appsv1.D cfg.Observability, cfg.Autoscaler, cfg.Deployment, ) + fmt.Println("errerr", err) if err != nil { t.Fatal("failed to create deployment") } @@ -698,11 +709,11 @@ func image(namespace, name string, co ...configOption) *caching.Image { opt(config) } - return resources.MakeImageCache(rev(namespace, name)) + return resources.MakeImageCache(rev(namespace, name, name), name) } func pa(namespace, name string, ko ...PodAutoscalerOption) *asv1a1.PodAutoscaler { - rev := rev(namespace, name) + rev := rev(namespace, name, name) k := resources.MakePA(rev) for _, opt := range ko { diff --git a/pkg/testing/v1/revision.go b/pkg/testing/v1/revision.go index ead895150a33..2877a5fccf2b 100644 --- a/pkg/testing/v1/revision.go +++ b/pkg/testing/v1/revision.go @@ -183,9 +183,9 @@ func WithRevisionLabel(key, value string) RevisionOption { } } -// WithImageDigests sets the .Status.ImageDigests to the Revision. -func WithImageDigests(r *v1.Revision) { - r.Status.ContainerStatuses = []v1.ContainerStatuses{{ - Name: "user-container", - }} +// WithContainerStatuses sets the .Status.ContainerStatuses to the Revision. +func WithContainerStatuses(containerStatus []v1.ContainerStatuses) RevisionOption { + return func(r *v1.Revision) { + r.Status.ContainerStatuses = containerStatus + } } From 3d476b31c3de1f6818257a603b656a65e1d56c32 Mon Sep 17 00:00:00 2001 From: savitaashture Date: Tue, 21 Apr 2020 20:34:38 +0530 Subject: [PATCH 2/5] Fix review comments --- pkg/reconciler/revision/cruds.go | 4 +- .../revision/reconcile_resources.go | 4 +- .../revision/resources/imagecache.go | 17 ++-- .../revision/resources/imagecache_test.go | 77 +++++++++++++++++-- pkg/reconciler/revision/table_test.go | 2 +- 5 files changed, 85 insertions(+), 19 deletions(-) diff --git a/pkg/reconciler/revision/cruds.go b/pkg/reconciler/revision/cruds.go index 10cc4a8c418d..d3dc4ed120d7 100644 --- a/pkg/reconciler/revision/cruds.go +++ b/pkg/reconciler/revision/cruds.go @@ -109,8 +109,8 @@ func (c *Reconciler) checkAndUpdateDeployment(ctx context.Context, rev *v1.Revis return d, nil } -func (c *Reconciler) createImageCache(rev *v1.Revision, name string) (*caching.Image, error) { - image := resources.MakeImageCache(rev, name) +func (c *Reconciler) createImageCache(rev *v1.Revision, containerName, imageDigest string) (*caching.Image, error) { + image := resources.MakeImageCache(rev, containerName, imageDigest) return c.cachingclient.CachingV1alpha1().Images(image.Namespace).Create(image) } diff --git a/pkg/reconciler/revision/reconcile_resources.go b/pkg/reconciler/revision/reconcile_resources.go index 35b9e96b9bac..28afea222381 100644 --- a/pkg/reconciler/revision/reconcile_resources.go +++ b/pkg/reconciler/revision/reconcile_resources.go @@ -114,10 +114,10 @@ func (c *Reconciler) reconcileImageCache(ctx context.Context, rev *v1.Revision) logger := logging.FromContext(ctx) ns := rev.Namespace - for _, container := range rev.Spec.Containers { + for _, container := range rev.Status.ContainerStatuses { imageName := kmeta.ChildName(resourcenames.ImageCache(rev), "-"+container.Name) if _, err := c.imageLister.Images(ns).Get(imageName); apierrs.IsNotFound(err) { - if _, err := c.createImageCache(rev, container.Name); err != nil { + if _, err := c.createImageCache(rev, container.Name, container.ImageDigest); err != nil { return fmt.Errorf("failed to create image cache %q: %w", imageName, err) } logger.Infof("Created image cache %q", imageName) diff --git a/pkg/reconciler/revision/resources/imagecache.go b/pkg/reconciler/revision/resources/imagecache.go index 2e75d2df1941..7ba8f7e5b022 100644 --- a/pkg/reconciler/revision/resources/imagecache.go +++ b/pkg/reconciler/revision/resources/imagecache.go @@ -27,20 +27,19 @@ import ( ) // MakeImageCache makes an caching.Image resources from a revision. -func MakeImageCache(rev *v1.Revision, name string) *caching.Image { - var image string - for _, digest := range rev.Status.ContainerStatuses { - if digest.Name == name { - image = digest.ImageDigest - } - } +func MakeImageCache(rev *v1.Revision, containerName, image string) *caching.Image { + // If the given image is empty then replace that with the image from spec.Container[*].Image. if image == "" { - image = rev.Spec.GetContainer().Image + for _, v := range rev.Spec.Containers { + if v.Name == containerName { + image = v.Image + } + } } img := &caching.Image{ ObjectMeta: metav1.ObjectMeta{ - Name: kmeta.ChildName(names.ImageCache(rev), "-"+name), + Name: kmeta.ChildName(names.ImageCache(rev), "-"+containerName), Namespace: rev.Namespace, Labels: makeLabels(rev), Annotations: kmeta.FilterMap(rev.GetAnnotations(), func(k string) bool { diff --git a/pkg/reconciler/revision/resources/imagecache_test.go b/pkg/reconciler/revision/resources/imagecache_test.go index c5c60cb3447d..90fd957468f0 100644 --- a/pkg/reconciler/revision/resources/imagecache_test.go +++ b/pkg/reconciler/revision/resources/imagecache_test.go @@ -31,9 +31,11 @@ import ( func TestMakeImageCache(t *testing.T) { tests := []struct { - name string - rev *v1.Revision - want *caching.Image + name string + rev *v1.Revision + containerName string + image string + want *caching.Image }{{ name: "simple container", rev: &v1.Revision{ @@ -62,6 +64,8 @@ func TestMakeImageCache(t *testing.T) { DeprecatedImageDigest: "busybox@sha256:deadbeef", }, }, + containerName: "user-container", + image: "busybox@sha256:deadbeef", want: &caching.Image{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", @@ -123,6 +127,66 @@ func TestMakeImageCache(t *testing.T) { DeprecatedImageDigest: "busybox@sha256:deadbeef", }, }, + containerName: "user-container1", + image: "ubuntu@sha256:deadbeef1", + want: &caching.Image{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar-cache-user-container1", + Labels: map[string]string{ + serving.RevisionLabelKey: "bar", + serving.RevisionUID: "1234", + AppLabelKey: "bar", + }, + Annotations: map[string]string{ + "a": "b", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: "Revision", + Name: "bar", + UID: "1234", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: caching.ImageSpec{ + Image: "ubuntu@sha256:deadbeef1", + }, + }, + }, { + name: "Image cache for multiple containers when image digests are empty", + rev: &v1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + Annotations: map[string]string{ + "a": "b", + serving.RevisionLastPinnedAnnotationKey: "c", + }, + UID: "1234", + }, + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "user-container", + Image: "ubuntu", + Ports: []corev1.ContainerPort{{ + ContainerPort: 80, + }}, + }, { + Name: "user-container1", + Image: "busybox", + }}, + }, + }, + Status: v1.RevisionStatus{ + ContainerStatuses: []v1.ContainerStatuses{{}}, + }, + }, + containerName: "user-container", + image: "", want: &caching.Image{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", @@ -145,7 +209,7 @@ func TestMakeImageCache(t *testing.T) { }}, }, Spec: caching.ImageSpec{ - Image: "busybox@sha256:deadbeef", + Image: "ubuntu", }, }, }, { @@ -161,11 +225,14 @@ func TestMakeImageCache(t *testing.T) { PodSpec: corev1.PodSpec{ ServiceAccountName: "privilegeless", Containers: []corev1.Container{{ + Name: "user-container", Image: "busybox", }}, }, }, }, + containerName: "user-container", + image: "", want: &caching.Image{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", @@ -194,7 +261,7 @@ func TestMakeImageCache(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - got := MakeImageCache(test.rev, "user-container") + got := MakeImageCache(test.rev, test.containerName, test.image) if diff := cmp.Diff(test.want, got); diff != "" { t.Errorf("MakeImageCache (-want, +got) = %v", diff) } diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index db1ebbe06b72..2f5e424ef3f9 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -709,7 +709,7 @@ func image(namespace, name string, co ...configOption) *caching.Image { opt(config) } - return resources.MakeImageCache(rev(namespace, name, name), name) + return resources.MakeImageCache(rev(namespace, name, name), name, "") } func pa(namespace, name string, ko ...PodAutoscalerOption) *asv1a1.PodAutoscaler { From 7a885a206dd51741c2389dcbb20439be8796bfbe Mon Sep 17 00:00:00 2001 From: savitaashture Date: Fri, 24 Apr 2020 20:16:53 +0530 Subject: [PATCH 3/5] fix review comments --- pkg/reconciler/revision/table_test.go | 97 +++++++++++++-------------- 1 file changed, 47 insertions(+), 50 deletions(-) diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index 2f5e424ef3f9..4a12c0599a0e 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -18,7 +18,6 @@ package revision import ( "context" - "fmt" "testing" appsv1 "k8s.io/api/apps/v1" @@ -66,7 +65,7 @@ func TestReconcile(t *testing.T) { Name: "nop deletion reconcile", // Test that with a DeletionTimestamp we do nothing. Objects: []runtime.Object{ - rev("foo", "delete-pending", "delete-pending", WithRevisionDeletionTimestamp), + rev("foo", "delete-pending", WithRevisionDeletionTimestamp), }, Key: "foo/delete-pending", }, { @@ -75,7 +74,7 @@ func TestReconcile(t *testing.T) { // We feed in a well formed Revision where none of its sub-resources exist, // and we expect it to create them and initialize the Revision's status. Objects: []runtime.Object{ - rev("foo", "first-reconcile", "first-reconcile"), + rev("foo", "first-reconcile"), }, WantCreates: []runtime.Object{ // The first reconciliation of a Revision creates the following resources. @@ -84,7 +83,7 @@ func TestReconcile(t *testing.T) { image("foo", "first-reconcile"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "first-reconcile", "first-reconcile", + Object: rev("foo", "first-reconcile", // The first reconciliation Populates the following status properties. WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ {Name: "first-reconcile"}, @@ -100,7 +99,7 @@ func TestReconcile(t *testing.T) { InduceFailure("update", "revisions"), }, Objects: []runtime.Object{ - rev("foo", "update-status-failure", "update-status-failure"), + rev("foo", "update-status-failure"), pa("foo", "update-status-failure"), }, WantCreates: []runtime.Object{ @@ -109,7 +108,7 @@ func TestReconcile(t *testing.T) { image("foo", "update-status-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "update-status-failure", "update-status-failure", + Object: rev("foo", "update-status-failure", // Despite failure, the following status properties are set. WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ {Name: "update-status-failure"}, @@ -129,7 +128,7 @@ func TestReconcile(t *testing.T) { InduceFailure("create", "podautoscalers"), }, Objects: []runtime.Object{ - rev("foo", "create-pa-failure", "create-pa-failure"), + rev("foo", "create-pa-failure"), }, WantCreates: []runtime.Object{ // We still see the following creates before the failure is induced. @@ -138,7 +137,7 @@ func TestReconcile(t *testing.T) { image("foo", "create-pa-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "create-pa-failure", "create-pa-failure", + Object: rev("foo", "create-pa-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "create-pa-failure"}})), @@ -156,7 +155,7 @@ func TestReconcile(t *testing.T) { InduceFailure("create", "deployments"), }, Objects: []runtime.Object{ - rev("foo", "create-user-deploy-failure", "create-user-deploy-failure"), + rev("foo", "create-user-deploy-failure"), pa("foo", "create-user-deploy-failure"), }, WantCreates: []runtime.Object{ @@ -164,7 +163,7 @@ func TestReconcile(t *testing.T) { deploy(t, "foo", "create-user-deploy-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "create-user-deploy-failure", "create-user-deploy-failure", + Object: rev("foo", "create-user-deploy-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "create-user-deploy-failure"}})), @@ -181,7 +180,7 @@ func TestReconcile(t *testing.T) { // state (immediately post-creation), and verify that no changes // are necessary. Objects: []runtime.Object{ - rev("foo", "stable-reconcile", "stable-reconcile", WithLogURL, AllUnknownConditions, + rev("foo", "stable-reconcile", WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "stable-reconcile"}})), pa("foo", "stable-reconcile", WithReachability(asv1a1.ReachabilityUnknown)), @@ -195,7 +194,7 @@ func TestReconcile(t *testing.T) { // Test that we update a deployment with new containers when they disagree // with our desired spec. Objects: []runtime.Object{ - rev("foo", "fix-containers", "fix-containers", + rev("foo", "fix-containers", WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-containers"}})), pa("foo", "fix-containers", WithReachability(asv1a1.ReachabilityUnknown)), changeContainers(deploy(t, "foo", "fix-containers")), @@ -213,7 +212,7 @@ func TestReconcile(t *testing.T) { InduceFailure("update", "deployments"), }, Objects: []runtime.Object{ - rev("foo", "failure-update-deploy", "failure-update-deploy", + rev("foo", "failure-update-deploy", withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "failure-update-deploy"}})), pa("foo", "failure-update-deploy"), changeContainers(deploy(t, "foo", "failure-update-deploy")), @@ -233,7 +232,7 @@ func TestReconcile(t *testing.T) { // We feed in a Revision and the resources it controls in a steady // state (port-Reserve), and verify that no changes are necessary. Objects: []runtime.Object{ - rev("foo", "stable-deactivation", "stable-deactivation", + rev("foo", "stable-deactivation", WithLogURL, MarkRevisionReady, MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{{Name: "stable-deactivation"}})), pa("foo", "stable-deactivation", @@ -245,14 +244,14 @@ func TestReconcile(t *testing.T) { }, { Name: "pa is ready", Objects: []runtime.Object{ - rev("foo", "pa-ready", "pa-ready", + rev("foo", "pa-ready", withK8sServiceName("old-stuff"), WithLogURL, AllUnknownConditions), pa("foo", "pa-ready", WithTraffic, WithPAStatusService("new-stuff"), WithReachability(asv1a1.ReachabilityUnknown)), deploy(t, "foo", "pa-ready"), image("foo", "pa-ready"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pa-ready", "pa-ready", withK8sServiceName("new-stuff"), + Object: rev("foo", "pa-ready", withK8sServiceName("new-stuff"), WithLogURL, // When the endpoint and pa are ready, then we will see the // Revision become ready. @@ -266,7 +265,7 @@ func TestReconcile(t *testing.T) { Name: "pa not ready", // Test propagating the pa not ready status to the Revision. Objects: []runtime.Object{ - rev("foo", "pa-not-ready", "pa-not-ready", + rev("foo", "pa-not-ready", withK8sServiceName("somebody-told-me"), WithLogURL, MarkRevisionReady), pa("foo", "pa-not-ready", @@ -276,7 +275,7 @@ func TestReconcile(t *testing.T) { image("foo", "pa-not-ready"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pa-not-ready", "pa-not-ready", + Object: rev("foo", "pa-not-ready", WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-not-ready"}}), withK8sServiceName("its-not-confidential"), // When we reconcile a ready state and our pa is in an activating @@ -289,7 +288,7 @@ func TestReconcile(t *testing.T) { Name: "pa inactive", // Test propagating the inactivity signal from the pa to the Revision. Objects: []runtime.Object{ - rev("foo", "pa-inactive", "pa-inactive", + rev("foo", "pa-inactive", withK8sServiceName("something-in-the-way"), WithLogURL, MarkRevisionReady), pa("foo", "pa-inactive", WithNoTraffic("NoTraffic", "This thing is inactive.")), @@ -297,7 +296,7 @@ func TestReconcile(t *testing.T) { image("foo", "pa-inactive"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pa-inactive", "pa-inactive", + Object: rev("foo", "pa-inactive", WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-inactive"}}), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. @@ -309,7 +308,7 @@ func TestReconcile(t *testing.T) { // Test propagating the inactivity signal from the pa to the Revision. // But propagate the service name. Objects: []runtime.Object{ - rev("foo", "pa-inactive", "pa-inactive", + rev("foo", "pa-inactive", withK8sServiceName("here-comes-the-sun"), WithLogURL, MarkRevisionReady), pa("foo", "pa-inactive", WithNoTraffic("NoTraffic", "This thing is inactive."), @@ -318,7 +317,7 @@ func TestReconcile(t *testing.T) { image("foo", "pa-inactive"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pa-inactive", "pa-inactive", + Object: rev("foo", "pa-inactive", WithLogURL, MarkRevisionReady, withK8sServiceName("pa-inactive-svc"), // When we reconcile an "all ready" revision when the PA @@ -332,7 +331,7 @@ func TestReconcile(t *testing.T) { // we bring it back to the required shape. // Protocol type is the only thing that can be changed on PA Objects: []runtime.Object{ - rev("foo", "fix-mutated-pa", "fix-mutated-pa", + rev("foo", "fix-mutated-pa", withK8sServiceName("ill-follow-the-sun"), WithLogURL, MarkRevisionReady), pa("foo", "fix-mutated-pa", WithProtocolType(networking.ProtocolH2C), WithTraffic, WithPAStatusService("fix-mutated-pa")), @@ -340,7 +339,7 @@ func TestReconcile(t *testing.T) { image("foo", "fix-mutated-pa"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "fix-mutated-pa", "fix-mutated-pa", + Object: rev("foo", "fix-mutated-pa", WithLogURL, AllUnknownConditions, // When our reconciliation has to change the service // we should see the following mutations to status. @@ -355,7 +354,7 @@ func TestReconcile(t *testing.T) { Name: "mutated pa gets error during the fix", // Same as above, but will fail during the update. Objects: []runtime.Object{ - rev("foo", "fix-mutated-pa-fail", "fix-mutated-pa-fail", + rev("foo", "fix-mutated-pa-fail", withK8sServiceName("some-old-stuff"), WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-mutated-pa-fail"}})), pa("foo", "fix-mutated-pa-fail", WithProtocolType(networking.ProtocolH2C), WithReachability(asv1a1.ReachabilityUnknown)), @@ -381,14 +380,14 @@ func TestReconcile(t *testing.T) { // condition. It then verifies that Reconcile propagates this into the // status of the Revision. Objects: []runtime.Object{ - rev("foo", "deploy-timeout", "deploy-timeout", + rev("foo", "deploy-timeout", withK8sServiceName("the-taxman"), WithLogURL, MarkActive), pa("foo", "deploy-timeout"), // pa can't be ready since deployment times out. timeoutDeploy(deploy(t, "foo", "deploy-timeout"), "I timed out!"), image("foo", "deploy-timeout"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "deploy-timeout", "deploy-timeout", + Object: rev("foo", "deploy-timeout", WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the PDE state. @@ -402,14 +401,14 @@ func TestReconcile(t *testing.T) { // but changes the user deployment to have a FailedCreate condition. // It then verifies that Reconcile propagates this into the status of the Revision. Objects: []runtime.Object{ - rev("foo", "deploy-replica-failure", "deploy-replica-failure", + rev("foo", "deploy-replica-failure", withK8sServiceName("the-taxman"), WithLogURL, MarkActive), pa("foo", "deploy-replica-failure"), replicaFailureDeploy(deploy(t, "foo", "deploy-replica-failure"), "I replica failed!"), image("foo", "deploy-replica-failure"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "deploy-replica-failure", "deploy-replica-failure", + Object: rev("foo", "deploy-replica-failure", WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the FailedCreate state. @@ -421,7 +420,7 @@ func TestReconcile(t *testing.T) { Name: "surface ImagePullBackoff", // Test the propagation of ImagePullBackoff from user container. Objects: []runtime.Object{ - rev("foo", "pull-backoff", "pull-backoff", + rev("foo", "pull-backoff", withK8sServiceName("the-taxman"), WithLogURL, MarkActivating("Deploying", ""), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pull-backoff"}})), pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnknown)), // pa can't be ready since deployment times out. pod(t, "foo", "pull-backoff", WithWaitingContainer("pull-backoff", "ImagePullBackoff", "can't pull it")), @@ -429,7 +428,7 @@ func TestReconcile(t *testing.T) { image("foo", "pull-backoff"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pull-backoff", "pull-backoff", + Object: rev("foo", "pull-backoff", WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pull-backoff"}})), }}, @@ -444,7 +443,7 @@ func TestReconcile(t *testing.T) { // but changes the user deployment to have a failing pod. It then verifies // that Reconcile propagates this into the status of the Revision. Objects: []runtime.Object{ - rev("foo", "pod-error", "pod-error", + rev("foo", "pod-error", withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive), pa("foo", "pod-error"), // PA can't be ready, since no traffic. pod(t, "foo", "pod-error", WithFailingContainer("pod-error", 5, "I failed man!")), @@ -452,7 +451,7 @@ func TestReconcile(t *testing.T) { image("foo", "pod-error"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pod-error", "pod-error", + Object: rev("foo", "pod-error", WithLogURL, AllUnknownConditions, MarkContainerExiting(5, v1.RevisionContainerExitingMessage("I failed man!")), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pod-error"}})), }}, @@ -463,7 +462,7 @@ func TestReconcile(t *testing.T) { // This initializes the world to unschedule pod. It then verifies // that Reconcile propagates this into the status of the Revision. Objects: []runtime.Object{ - rev("foo", "pod-schedule-error", "pod-schedule-error", + rev("foo", "pod-schedule-error", withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive), pa("foo", "pod-schedule-error"), // PA can't be ready, since no traffic. pod(t, "foo", "pod-schedule-error", WithUnschedulableContainer("Insufficient energy", "Unschedulable")), @@ -471,7 +470,7 @@ func TestReconcile(t *testing.T) { image("foo", "pod-schedule-error"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pod-schedule-error", "pod-schedule-error", + Object: rev("foo", "pod-schedule-error", WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", "Unschedulable"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pod-schedule-error"}})), }}, @@ -484,13 +483,13 @@ func TestReconcile(t *testing.T) { // Revision. It then creates an Endpoints resource with active subsets. // This signal should make our Reconcile mark the Revision as Ready. Objects: []runtime.Object{ - rev("foo", "steady-ready", "steady-ready", withK8sServiceName("very-steady"), WithLogURL), + rev("foo", "steady-ready", withK8sServiceName("very-steady"), WithLogURL), pa("foo", "steady-ready", WithTraffic, WithPAStatusService("steadier-even")), deploy(t, "foo", "steady-ready"), image("foo", "steady-ready"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "steady-ready", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, + Object: rev("foo", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, // All resources are ready to go, we should see the revision being // marked ready MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "steady-ready"}})), @@ -503,14 +502,14 @@ func TestReconcile(t *testing.T) { Name: "lost pa owner ref", WantErr: true, Objects: []runtime.Object{ - rev("foo", "missing-owners", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, + rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, MarkRevisionReady), pa("foo", "missing-owners", WithTraffic, WithPodAutoscalerOwnersRemoved), deploy(t, "foo", "missing-owners"), image("foo", "missing-owners"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "missing-owners", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, + Object: rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, MarkRevisionReady, // When we're missing the OwnerRef for PodAutoscaler we see this update. MarkResourceNotOwned("PodAutoscaler", "missing-owners"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), @@ -523,14 +522,14 @@ func TestReconcile(t *testing.T) { Name: "lost deployment owner ref", WantErr: true, Objects: []runtime.Object{ - rev("foo", "missing-owners", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, + rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), pa("foo", "missing-owners", WithTraffic), noOwner(deploy(t, "foo", "missing-owners")), image("foo", "missing-owners"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "missing-owners", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, + Object: rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, MarkRevisionReady, // When we're missing the OwnerRef for Deployment we see this update. MarkResourceNotOwned("Deployment", "missing-owners-deployment"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), @@ -543,7 +542,7 @@ func TestReconcile(t *testing.T) { Name: "image pull secrets", // This test case tests that the image pull secrets from revision propagate to deployment and image Objects: []runtime.Object{ - rev("foo", "image-pull-secrets", "image-pull-secrets", WithImagePullSecrets("foo-secret")), + rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret")), }, WantCreates: []runtime.Object{ pa("foo", "image-pull-secrets"), @@ -551,7 +550,7 @@ func TestReconcile(t *testing.T) { imagePullSecrets(image("foo", "image-pull-secrets"), "foo-secret"), }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "image-pull-secrets", "image-pull-secrets", + Object: rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret"), WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "image-pull-secrets"}})), }}, @@ -632,7 +631,7 @@ func changeContainers(deploy *appsv1.Deployment) *appsv1.Deployment { return deploy } -func rev(namespace, name, containerName string, ro ...RevisionOption) *v1.Revision { +func rev(namespace, name string, ro ...RevisionOption) *v1.Revision { r := &v1.Revision{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -642,7 +641,7 @@ func rev(namespace, name, containerName string, ro ...RevisionOption) *v1.Revisi Spec: v1.RevisionSpec{ PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ - Name: containerName, + Name: name, Image: "busybox", }}, }, @@ -681,7 +680,7 @@ func deploy(t *testing.T, namespace, name string, opts ...interface{}) *appsv1.D } } - rev := rev(namespace, name, name) + rev := rev(namespace, name) for _, opt := range opts { if revOpt, ok := opt.(RevisionOption); ok { @@ -695,8 +694,6 @@ func deploy(t *testing.T, namespace, name string, opts ...interface{}) *appsv1.D deployment, err := resources.MakeDeployment(rev, cfg.Logging, cfg.Tracing, cfg.Network, cfg.Observability, cfg.Autoscaler, cfg.Deployment, ) - - fmt.Println("errerr", err) if err != nil { t.Fatal("failed to create deployment") } @@ -709,11 +706,11 @@ func image(namespace, name string, co ...configOption) *caching.Image { opt(config) } - return resources.MakeImageCache(rev(namespace, name, name), name, "") + return resources.MakeImageCache(rev(namespace, name), name, "") } func pa(namespace, name string, ko ...PodAutoscalerOption) *asv1a1.PodAutoscaler { - rev := rev(namespace, name, name) + rev := rev(namespace, name) k := resources.MakePA(rev) for _, opt := range ko { From 88df5eb8342724c9cb0f8167f76527070c647277 Mon Sep 17 00:00:00 2001 From: savitaashture Date: Mon, 27 Apr 2020 19:39:30 +0530 Subject: [PATCH 4/5] Add comments for clear information --- pkg/reconciler/revision/reconcile_resources.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/reconciler/revision/reconcile_resources.go b/pkg/reconciler/revision/reconcile_resources.go index 28afea222381..48065c5b1c42 100644 --- a/pkg/reconciler/revision/reconcile_resources.go +++ b/pkg/reconciler/revision/reconcile_resources.go @@ -114,6 +114,8 @@ func (c *Reconciler) reconcileImageCache(ctx context.Context, rev *v1.Revision) logger := logging.FromContext(ctx) ns := rev.Namespace + // Revisions are immutable. + // Updating image results to new revision so there won't be any chance of resource leak. for _, container := range rev.Status.ContainerStatuses { imageName := kmeta.ChildName(resourcenames.ImageCache(rev), "-"+container.Name) if _, err := c.imageLister.Images(ns).Get(imageName); apierrs.IsNotFound(err) { From a75a25476aa8fadff814b1f6e7ceea40c93833a9 Mon Sep 17 00:00:00 2001 From: savitaashture Date: Tue, 28 Apr 2020 00:37:17 +0530 Subject: [PATCH 5/5] Remove using the image value when there are no digests --- pkg/reconciler/revision/resources/imagecache.go | 9 --------- pkg/reconciler/revision/resources/imagecache_test.go | 12 +++++++++--- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/pkg/reconciler/revision/resources/imagecache.go b/pkg/reconciler/revision/resources/imagecache.go index 7ba8f7e5b022..1355705f304a 100644 --- a/pkg/reconciler/revision/resources/imagecache.go +++ b/pkg/reconciler/revision/resources/imagecache.go @@ -28,15 +28,6 @@ import ( // MakeImageCache makes an caching.Image resources from a revision. func MakeImageCache(rev *v1.Revision, containerName, image string) *caching.Image { - // If the given image is empty then replace that with the image from spec.Container[*].Image. - if image == "" { - for _, v := range rev.Spec.Containers { - if v.Name == containerName { - image = v.Image - } - } - } - img := &caching.Image{ ObjectMeta: metav1.ObjectMeta{ Name: kmeta.ChildName(names.ImageCache(rev), "-"+containerName), diff --git a/pkg/reconciler/revision/resources/imagecache_test.go b/pkg/reconciler/revision/resources/imagecache_test.go index 90fd957468f0..4b1ce2265062 100644 --- a/pkg/reconciler/revision/resources/imagecache_test.go +++ b/pkg/reconciler/revision/resources/imagecache_test.go @@ -209,7 +209,7 @@ func TestMakeImageCache(t *testing.T) { }}, }, Spec: caching.ImageSpec{ - Image: "ubuntu", + Image: "", }, }, }, { @@ -230,9 +230,15 @@ func TestMakeImageCache(t *testing.T) { }}, }, }, + Status: v1.RevisionStatus{ + ContainerStatuses: []v1.ContainerStatuses{{ + Name: "user-container", + ImageDigest: "busybox@sha256:deadbeef", + }}, + }, }, containerName: "user-container", - image: "", + image: "busybox@sha256:deadbeef", want: &caching.Image{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", @@ -253,7 +259,7 @@ func TestMakeImageCache(t *testing.T) { }}, }, Spec: caching.ImageSpec{ - Image: "busybox", + Image: "busybox@sha256:deadbeef", ServiceAccountName: "privilegeless", }, },