diff --git a/pkg/apis/serving/v1/revision_helpers.go b/pkg/apis/serving/v1/revision_helpers.go index 3edb3ade1aa7..e3dd2e6b62b3 100644 --- a/pkg/apis/serving/v1/revision_helpers.go +++ b/pkg/apis/serving/v1/revision_helpers.go @@ -69,11 +69,19 @@ func (e LastPinnedParseError) Error() string { } // GetContainer returns a pointer to the relevant corev1.Container field. -// It is never nil and should be exactly the specified container as guaranteed -// by validation. +// It is never nil and should be exactly the specified container if len(containers) == 1 or +// if there are multiple containers it returns the container which has Ports +// as guaranteed by validation. func (rs *RevisionSpec) GetContainer() *corev1.Container { - if len(rs.Containers) > 0 { + switch { + case len(rs.Containers) == 1: return &rs.Containers[0] + case len(rs.Containers) > 1: + for i := range rs.Containers { + if len(rs.Containers[i].Ports) != 0 { + return &rs.Containers[i] + } + } } // Should be unreachable post-validation, but here to ease testing. return &corev1.Container{} diff --git a/pkg/apis/serving/v1/revision_lifecycle_test.go b/pkg/apis/serving/v1/revision_lifecycle_test.go index 06ba0c17a9b7..f2fcd0ce48b1 100644 --- a/pkg/apis/serving/v1/revision_lifecycle_test.go +++ b/pkg/apis/serving/v1/revision_lifecycle_test.go @@ -765,12 +765,15 @@ func TestGetContainer(t *testing.T) { Image: "foo", }, }, { - name: "get first container info even after passing multiple", + name: "get serving container info even if there are multiple containers", status: RevisionSpec{ PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ Name: "firstContainer", Image: "firstImage", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, }, { Name: "secondContainer", Image: "secondImage", @@ -780,7 +783,24 @@ func TestGetContainer(t *testing.T) { want: &corev1.Container{ Name: "firstContainer", Image: "firstImage", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, + }, + }, { + name: "get empty container when passed multiple containers without the container port", + status: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "firstContainer", + Image: "firstImage", + }, { + Name: "secondContainer", + Image: "secondImage", + }}, + }, }, + want: &corev1.Container{}, }} for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/apis/serving/v1alpha1/revision_lifecycle.go b/pkg/apis/serving/v1alpha1/revision_lifecycle.go index c9437423a3d2..46de3402d14a 100644 --- a/pkg/apis/serving/v1alpha1/revision_lifecycle.go +++ b/pkg/apis/serving/v1alpha1/revision_lifecycle.go @@ -70,14 +70,22 @@ func (r *Revision) GetGroupVersionKind() schema.GroupVersionKind { } // GetContainer returns a pointer to the relevant corev1.Container field. -// It is never nil and should be exactly the specified container as guaranteed -// by validation. +// It is never nil and should be exactly the specified container if len(containers) == 1 or +// if there are multiple containers it returns the container which has Ports +// as guaranteed by validation. func (rs *RevisionSpec) GetContainer() *corev1.Container { if rs.DeprecatedContainer != nil { return rs.DeprecatedContainer } - if len(rs.Containers) > 0 { + switch { + case len(rs.Containers) == 1: return &rs.Containers[0] + case len(rs.Containers) > 1: + for i := range rs.Containers { + if len(rs.Containers[i].Ports) != 0 { + return &rs.Containers[i] + } + } } // Should be unreachable post-validation, but is here to ease testing. return &corev1.Container{} diff --git a/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go b/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go index 06bed42c766c..d42db94a4e5b 100644 --- a/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go +++ b/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go @@ -759,13 +759,32 @@ func TestGetContainer(t *testing.T) { Image: "foo", }, }, { - name: "get first container info even after passing multiple", + name: "get container info", + status: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "servingContainer", + Image: "firstImage", + }}, + }, + }, + }, + want: &corev1.Container{ + Name: "servingContainer", + Image: "firstImage", + }, + }, { + name: "get serving container info even if there are multiple containers", status: RevisionSpec{ RevisionSpec: v1.RevisionSpec{ PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ Name: "firstContainer", Image: "firstImage", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, }, { Name: "secondContainer", Image: "secondImage", @@ -776,7 +795,26 @@ func TestGetContainer(t *testing.T) { want: &corev1.Container{ Name: "firstContainer", Image: "firstImage", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, + }, + }, { + name: "get empty container when passed multiple containers without the container port", + status: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "firstContainer", + Image: "firstImage", + }, { + Name: "secondContainer", + Image: "secondImage", + }}, + }, + }, }, + want: &corev1.Container{}, }} for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index fe4607931814..2ea2c26033e3 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -126,8 +126,7 @@ func makePodSpec(rev *v1.Revision, loggingConfig *logging.Config, tracingConfig return nil, fmt.Errorf("failed to create queue-proxy container: %w", err) } - userContainer := BuildUserContainer(rev) - podSpec := BuildPodSpec(rev, []corev1.Container{*userContainer, *queueContainer}) + podSpec := BuildPodSpec(rev, append(BuildUserContainers(rev), *queueContainer)) if autoscalerConfig.EnableGracefulScaledown { podSpec.Volumes = append(podSpec.Volumes, labelVolume) @@ -136,49 +135,65 @@ func makePodSpec(rev *v1.Revision, loggingConfig *logging.Config, tracingConfig return podSpec, nil } -// BuildUserContainer makes a container from the Revision template. -func BuildUserContainer(rev *v1.Revision) *corev1.Container { - userContainer := rev.Spec.GetContainer().DeepCopy() +// BuildUserContainers makes an array of containers from the Revision template. +func BuildUserContainers(rev *v1.Revision) []corev1.Container { + containers := make([]corev1.Container, 0, len(rev.Spec.PodSpec.Containers)) + for i := range rev.Spec.PodSpec.Containers { + var container corev1.Container + if len(rev.Spec.PodSpec.Containers[i].Ports) != 0 || len(rev.Spec.PodSpec.Containers) == 1 { + container = makeServingContainer(*rev.Spec.PodSpec.Containers[i].DeepCopy(), rev) + } else { + container = makeContainer(*rev.Spec.PodSpec.Containers[i].DeepCopy(), rev) + } + // The below logic is safe because the image digests in Status.ContainerStatus will have been resolved + // before this method is called. We check for an empty array here because the method can also be + // called during DryRun, where ContainerStatuses will not yet have been resolved. + if len(rev.Status.ContainerStatuses) != 0 { + if rev.Status.ContainerStatuses[i].ImageDigest != "" { + container.Image = rev.Status.ContainerStatuses[i].ImageDigest + } + } + containers = append(containers, container) + } + return containers +} + +func makeContainer(container corev1.Container, rev *v1.Revision) corev1.Container { // Adding or removing an overwritten corev1.Container field here? Don't forget to // update the fieldmasks / validations in pkg/apis/serving varLogMount := varLogVolumeMount.DeepCopy() - varLogMount.SubPathExpr += userContainer.Name - - userContainer.VolumeMounts = append(userContainer.VolumeMounts, *varLogMount) - userContainer.Lifecycle = userLifecycle - userPort := getUserPort(rev) - userPortInt := int(userPort) - userPortStr := strconv.Itoa(userPortInt) - // Replacement is safe as only up to a single port is allowed on the Revision - userContainer.Ports = buildContainerPorts(userPort) - userContainer.Env = append(userContainer.Env, buildUserPortEnv(userPortStr)) - userContainer.Env = append(userContainer.Env, getKnativeEnvVar(rev)...) - userContainer.Env = append(userContainer.Env, buildVarLogSubpathEnvs()...) + varLogMount.SubPathExpr += container.Name + container.VolumeMounts = append(container.VolumeMounts, *varLogMount) + container.Lifecycle = userLifecycle + container.Env = append(container.Env, getKnativeEnvVar(rev)...) + container.Env = append(container.Env, buildVarLogSubpathEnvs()...) // Explicitly disable stdin and tty allocation - userContainer.Stdin = false - userContainer.TTY = false - - // Prefer imageDigest from revision if available - if rev.Status.DeprecatedImageDigest != "" { - userContainer.Image = rev.Status.DeprecatedImageDigest - } - - if userContainer.TerminationMessagePolicy == "" { - userContainer.TerminationMessagePolicy = corev1.TerminationMessageFallbackToLogsOnError + container.Stdin = false + container.TTY = false + if container.TerminationMessagePolicy == "" { + container.TerminationMessagePolicy = corev1.TerminationMessageFallbackToLogsOnError } + return container +} - if userContainer.ReadinessProbe != nil { - if userContainer.ReadinessProbe.HTTPGet != nil || userContainer.ReadinessProbe.TCPSocket != nil { +func makeServingContainer(servingContainer corev1.Container, rev *v1.Revision) corev1.Container { + userPort := getUserPort(rev) + userPortStr := strconv.Itoa(int(userPort)) + // Replacement is safe as only up to a single port is allowed on the Revision + servingContainer.Ports = buildContainerPorts(userPort) + servingContainer.Env = append(servingContainer.Env, buildUserPortEnv(userPortStr)) + container := makeContainer(servingContainer, rev) + if container.ReadinessProbe != nil { + if container.ReadinessProbe.HTTPGet != nil || container.ReadinessProbe.TCPSocket != nil { // HTTP and TCP ReadinessProbes are executed by the queue-proxy directly against the // user-container instead of via kubelet. - userContainer.ReadinessProbe = nil + container.ReadinessProbe = nil } } - // If the client provides probes, we should fill in the port for them. - rewriteUserProbe(userContainer.LivenessProbe, userPortInt) - return userContainer + rewriteUserProbe(container.LivenessProbe, int(userPort)) + return container } // BuildPodSpec creates a PodSpec from the given revision and containers. diff --git a/pkg/reconciler/revision/resources/deploy_test.go b/pkg/reconciler/revision/resources/deploy_test.go index 6a97ef2be742..aa007881f300 100644 --- a/pkg/reconciler/revision/resources/deploy_test.go +++ b/pkg/reconciler/revision/resources/deploy_test.go @@ -40,19 +40,23 @@ import ( autoscalerconfig "knative.dev/serving/pkg/autoscaler/config" "knative.dev/serving/pkg/deployment" "knative.dev/serving/pkg/network" + + . "knative.dev/serving/pkg/testing/v1" ) var ( - containerName = "my-container-name" + servingContainerName = "serving-container" + sidecarContainerName = "sidecar-container-1" + sidecarContainerName2 = "sidecar-container-2" sidecarIstioInjectAnnotation = "sidecar.istio.io/inject" - defaultUserContainer = &corev1.Container{ - Name: containerName, + defaultServingContainer = &corev1.Container{ + Name: servingContainerName, Image: "busybox", Ports: buildContainerPorts(v1.DefaultUserPort), VolumeMounts: []corev1.VolumeMount{{ Name: varLogVolume.Name, MountPath: "/var/log", - SubPathExpr: "$(K_INTERNAL_POD_NAMESPACE)_$(K_INTERNAL_POD_NAME)_my-container-name", + SubPathExpr: "$(K_INTERNAL_POD_NAMESPACE)_$(K_INTERNAL_POD_NAME)_" + servingContainerName, }}, Lifecycle: userLifecycle, TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, @@ -217,22 +221,48 @@ var ( }, }, } +) + +func defaultSidecarContainer(containerName string) *corev1.Container { + return &corev1.Container{ + Name: containerName, + Image: "ubuntu", + VolumeMounts: []corev1.VolumeMount{{ + Name: varLogVolume.Name, + MountPath: "/var/log", + SubPathExpr: "$(K_INTERNAL_POD_NAMESPACE)_$(K_INTERNAL_POD_NAME)_" + containerName, + }}, + Lifecycle: userLifecycle, + TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, + Stdin: false, + TTY: false, + Env: []corev1.EnvVar{{ + Name: "K_REVISION", + Value: "bar", + }, { + Name: "K_CONFIGURATION", + }, { + Name: "K_SERVICE", + }, { + Name: "K_INTERNAL_POD_NAME", + ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}}, + }, { + Name: "K_INTERNAL_POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}}, + }}, + } +} - defaultRevision = &v1.Revision{ +func defaultRevision() *v1.Revision { + return &v1.Revision{ ObjectMeta: metav1.ObjectMeta{ UID: "1234", }, Spec: v1.RevisionSpec{ TimeoutSeconds: ptr.Int64(45), - PodSpec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: containerName, - Image: "busybox", - }}, - }, }, } -) +} func refInt64(num int64) *int64 { return &num @@ -241,7 +271,6 @@ func refInt64(num int64) *int64 { type containerOption func(*corev1.Container) type podSpecOption func(*corev1.PodSpec) type deploymentOption func(*appsv1.Deployment) -type revisionOption func(*v1.Revision) func container(container *corev1.Container, opts ...containerOption) corev1.Container { for _, option := range opts { @@ -250,8 +279,12 @@ func container(container *corev1.Container, opts ...containerOption) corev1.Cont return *container } -func userContainer(opts ...containerOption) corev1.Container { - return container(defaultUserContainer.DeepCopy(), opts...) +func servingContainer(opts ...containerOption) corev1.Container { + return container(defaultServingContainer.DeepCopy(), opts...) +} + +func sidecarContainer(containerName string, opts ...containerOption) corev1.Container { + return container(defaultSidecarContainer(containerName).DeepCopy(), opts...) } func queueContainer(opts ...containerOption) corev1.Container { @@ -280,36 +313,32 @@ func withPodLabelsVolumeMount() containerOption { } } -func withReadinessProbe(handler corev1.Handler) containerOption { - return func(container *corev1.Container) { - container.ReadinessProbe = &corev1.Probe{Handler: handler} - } +func withTCPReadinessProbe(port int) *corev1.Probe { + return &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(port), + }}} } -func withTCPReadinessProbe() containerOption { - return withReadinessProbe(corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{ - Host: "127.0.0.1", - Port: intstr.FromInt(v1.DefaultUserPort), - }, - }) -} - -func withHTTPReadinessProbe(port int) containerOption { - return withReadinessProbe(corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(port), - Path: "/", - }, - }) +func withHTTPReadinessProbe(port int) *corev1.Probe { + return &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(port), + Path: "/", + }, + }} } -func withExecReadinessProbe(command []string) containerOption { - return withReadinessProbe(corev1.Handler{ - Exec: &corev1.ExecAction{ - Command: command, - }, - }) +func withExecReadinessProbe(command []string) *corev1.Probe { + return &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: command, + }, + }} } func withLivenessProbe(handler corev1.Handler) containerOption { @@ -349,8 +378,8 @@ func appsv1deployment(opts ...deploymentOption) *appsv1.Deployment { return deploy } -func revision(name, ns string, opts ...revisionOption) *v1.Revision { - revision := defaultRevision.DeepCopy() +func revision(name, ns string, opts ...RevisionOption) *v1.Revision { + revision := defaultRevision() revision.ObjectMeta.Name = name revision.ObjectMeta.Namespace = ns for _, option := range opts { @@ -359,7 +388,7 @@ func revision(name, ns string, opts ...revisionOption) *v1.Revision { return revision } -func withContainerConcurrency(cc int64) revisionOption { +func withContainerConcurrency(cc int64) RevisionOption { return func(revision *v1.Revision) { revision.Spec.ContainerConcurrency = &cc } @@ -369,7 +398,7 @@ func withoutLabels(revision *v1.Revision) { revision.ObjectMeta.Labels = map[string]string{} } -func withOwnerReference(name string) revisionOption { +func withOwnerReference(name string) RevisionOption { return func(revision *v1.Revision) { revision.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{ APIVersion: v1.SchemeGroupVersion.String(), @@ -381,6 +410,68 @@ func withOwnerReference(name string) revisionOption { } } +// withContainers function helps to provide a podSpec to the revision for both single and multiple containers +func withContainers(containers []corev1.Container) RevisionOption { + return func(revision *v1.Revision) { + revision.Spec = v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: containers, + }, + TimeoutSeconds: ptr.Int64(45), + } + } +} + +func getContainer() containerOption { + return func(container *corev1.Container) { + container.Command = []string{"/bin/bash"} + container.Args = []string{"-c", "echo Hello world"} + container.Env = append([]corev1.EnvVar{{ + Name: "FOO", + Value: "bar", + }, { + Name: "BAZ", + Value: "blah", + }}, container.Env...) + container.Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("666Mi"), + corev1.ResourceCPU: resource.MustParse("666m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("888Mi"), + corev1.ResourceCPU: resource.MustParse("888m"), + }, + } + container.TerminationMessagePolicy = corev1.TerminationMessageReadFile + } +} + +func updateContainer() containerOption { + return func(container *corev1.Container) { + container.Command = []string{"/bin/bash"} + container.Args = []string{"-c", "echo Hello world"} + container.Env = append([]corev1.EnvVar{{ + Name: "FOO", + Value: "bar", + }, { + Name: "BAZ", + Value: "blah", + }}, container.Env...) + container.Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("666Mi"), + corev1.ResourceCPU: resource.MustParse("666m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("888Mi"), + corev1.ResourceCPU: resource.MustParse("888m"), + }, + } + container.TerminationMessagePolicy = corev1.TerminationMessageReadFile + } +} + func TestMakePodSpec(t *testing.T) { tests := []struct { name string @@ -391,21 +482,25 @@ func TestMakePodSpec(t *testing.T) { }{{ name: "user-defined user port, queue proxy have PORT env", rev: revision("bar", "foo", - withContainerConcurrency(1), - func(revision *v1.Revision) { - revision.Spec.GetContainer().Ports = []corev1.ContainerPort{{ + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ ContainerPort: 8888, - }} - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) - }, + }}, + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), + withContainerConcurrency(1), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), ), want: podSpec( []corev1.Container{ - userContainer( + servingContainer( func(container *corev1.Container) { container.Ports[0].ContainerPort = 8888 + container.Image = "busybox@sha256:deadbeef" }, withEnvVar("PORT", "8888"), ), @@ -413,23 +508,27 @@ func TestMakePodSpec(t *testing.T) { withEnvVar("CONTAINER_CONCURRENCY", "1"), withEnvVar("USER_PORT", "8888"), withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8888,"host":"127.0.0.1"}}`), - ), - }), + )}), }, { name: "volumes passed through", rev: revision("bar", "foo", - withContainerConcurrency(1), - func(revision *v1.Revision) { - revision.Spec.GetContainer().Ports = []corev1.ContainerPort{{ + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ ContainerPort: 8888, - }} - revision.Spec.GetContainer().VolumeMounts = []corev1.VolumeMount{{ + }}, + VolumeMounts: []corev1.VolumeMount{{ Name: "asdf", MountPath: "/asdf", - }} - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) + }}, + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), + withContainerConcurrency(1), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), + func(revision *v1.Revision) { revision.Spec.Volumes = []corev1.Volume{{ Name: "asdf", VolumeSource: corev1.VolumeSource{ @@ -442,9 +541,10 @@ func TestMakePodSpec(t *testing.T) { ), want: podSpec( []corev1.Container{ - userContainer( + servingContainer( func(container *corev1.Container) { container.Ports[0].ContainerPort = 8888 + container.Image = "busybox@sha256:deadbeef" }, withEnvVar("PORT", "8888"), withPrependedVolumeMounts(corev1.VolumeMount{ @@ -468,16 +568,21 @@ func TestMakePodSpec(t *testing.T) { }, { name: "concurrency=1 no owner", rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) - }, + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), ), want: podSpec( []corev1.Container{ - userContainer(), + servingContainer(func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "1"), ), @@ -485,19 +590,19 @@ func TestMakePodSpec(t *testing.T) { }, { name: "concurrency=1 no owner digest resolved", rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), withContainerConcurrency(1), - func(revision *v1.Revision) { - revision.Status = v1.RevisionStatus{ - DeprecatedImageDigest: "busybox@sha256:deadbeef", - } - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) - }, + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), ), want: podSpec( []corev1.Container{ - userContainer(func(container *corev1.Container) { + servingContainer(func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" }), queueContainer( @@ -507,17 +612,22 @@ func TestMakePodSpec(t *testing.T) { }, { name: "concurrency=1 with owner", rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), withContainerConcurrency(1), withOwnerReference("parent-config"), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) - }, + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), ), want: podSpec( []corev1.Container{ - userContainer(), + servingContainer(func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }), queueContainer( withEnvVar("SERVING_CONFIGURATION", "parent-config"), withEnvVar("CONTAINER_CONCURRENCY", "1"), @@ -525,14 +635,21 @@ func TestMakePodSpec(t *testing.T) { }), }, { name: "with http readiness probe", - rev: revision("bar", "foo", func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withHTTPReadinessProbe(v1.DefaultUserPort), - ) - }), + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withHTTPReadinessProbe(v1.DefaultUserPort), + }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), + ), want: podSpec( []corev1.Container{ - userContainer(), + servingContainer(func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "0"), withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP","httpHeaders":[{"name":"K-Kubelet-Probe","value":"queue"}]}}`), @@ -540,12 +657,21 @@ func TestMakePodSpec(t *testing.T) { }), }, { name: "with tcp readiness probe", - rev: revision("bar", "foo", func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(12345), + }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), + ), want: podSpec( []corev1.Container{ - userContainer(), + servingContainer(func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "0"), withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8080,"host":"127.0.0.1"}}`), @@ -553,15 +679,23 @@ func TestMakePodSpec(t *testing.T) { }), }, { name: "with shell readiness probe", - rev: revision("bar", "foo", func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withExecReadinessProbe([]string{"echo", "hello"}), - ) - }), + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withExecReadinessProbe([]string{"echo", "hello"}), + }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), + ), want: podSpec( []corev1.Container{ - userContainer( - withExecReadinessProbe([]string{"echo", "hello"})), + servingContainer( + func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + container.ReadinessProbe = withExecReadinessProbe([]string{"echo", "hello"}) + }), queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "0"), withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8080,"host":"127.0.0.1"}}`), @@ -569,19 +703,29 @@ func TestMakePodSpec(t *testing.T) { }), }, { name: "with http liveness probe", - rev: revision("bar", "foo", func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - withLivenessProbe(corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/", + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + }, }, - }), - ) - }), + }, + }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), + ), want: podSpec( []corev1.Container{ - userContainer( + servingContainer( + func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }, withLivenessProbe(corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/", @@ -599,17 +743,26 @@ func TestMakePodSpec(t *testing.T) { }), }, { name: "with tcp liveness probe", - rev: revision("bar", "foo", func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - withLivenessProbe(corev1.Handler{ - TCPSocket: &corev1.TCPSocketAction{}, - }), - ) - }), + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }}}}, + ), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), + ), want: podSpec( []corev1.Container{ - userContainer( + servingContainer( + func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }, withLivenessProbe(corev1.Handler{ TCPSocket: &corev1.TCPSocketAction{ Port: intstr.FromInt(v1.DefaultUserPort), @@ -622,17 +775,64 @@ func TestMakePodSpec(t *testing.T) { }), }, { name: "with graceful scaledown enabled", - rev: revision("bar", "foo", func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - ) - }), + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), + ), ac: autoscalerconfig.Config{ EnableGracefulScaledown: true, }, want: podSpec( []corev1.Container{ - userContainer(), + servingContainer(func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }), + queueContainer( + withEnvVar("DOWNWARD_API_LABELS_PATH", fmt.Sprintf("%s/%s", podInfoVolumePath, metadataLabelsPath)), + withPodLabelsVolumeMount(), + ), + }, + func(podSpec *corev1.PodSpec) { + podSpec.Volumes = append(podSpec.Volumes, labelVolume) + }, + ), + }, { + name: "with graceful scaledown enabled for multi container", + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: v1.DefaultUserPort, + }}, + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }, { + Name: sidecarContainerName, + Image: "ubuntu", + }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }, { + ImageDigest: "ubuntu@sha256:deadbffe", + }}), + ), + ac: autoscalerconfig.Config{ + EnableGracefulScaledown: true, + }, + want: podSpec( + []corev1.Container{ + servingContainer(func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }), + sidecarContainer(sidecarContainerName, func(container *corev1.Container) { + container.Image = "ubuntu@sha256:deadbffe" + }), queueContainer( withEnvVar("DOWNWARD_API_LABELS_PATH", fmt.Sprintf("%s/%s", podInfoVolumePath, metadataLabelsPath)), withPodLabelsVolumeMount(), @@ -645,56 +845,149 @@ func TestMakePodSpec(t *testing.T) { }, { name: "complex pod spec", rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }}), withContainerConcurrency(1), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), func(revision *v1.Revision) { revision.ObjectMeta.Labels = map[string]string{ serving.ConfigurationLabelKey: "cfg", serving.ServiceLabelKey: "svc", } - revision.Spec.GetContainer().Command = []string{"/bin/bash"} - revision.Spec.GetContainer().Args = []string{"-c", "echo Hello world"} - container(revision.Spec.GetContainer(), - withTCPReadinessProbe(), - withEnvVar("FOO", "bar"), - withEnvVar("BAZ", "blah"), - ) - revision.Spec.GetContainer().Resources = corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("666Mi"), - corev1.ResourceCPU: resource.MustParse("666m"), + container(revision.Spec.GetContainer(), updateContainer()) + }, + ), + want: podSpec( + []corev1.Container{ + servingContainer( + func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" }, - Limits: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("888Mi"), - corev1.ResourceCPU: resource.MustParse("888m"), + getContainer(), + withEnvVar("K_CONFIGURATION", "cfg"), + withEnvVar("K_SERVICE", "svc"), + ), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "1"), + withEnvVar("SERVING_SERVICE", "svc"), + )}), + }, { + name: "complex pod spec for multiple containers with container data to all containers", + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: v1.DefaultUserPort, + }}, + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }, { + Name: sidecarContainerName, + Image: "ubuntu", + }, { + Name: "sidecar-container-2", + Image: "alpine", + }}), + withContainerConcurrency(1), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }, { + ImageDigest: "ubuntu@sha256:deadbffe", + }, { + ImageDigest: "alpine@sha256:deadbfff", + }}), + func(revision *v1.Revision) { + revision.ObjectMeta.Labels = map[string]string{ + serving.ConfigurationLabelKey: "cfg", + serving.ServiceLabelKey: "svc", + } + for i := range revision.Spec.Containers { + container(&revision.Spec.Containers[i], updateContainer()) + } + }, + ), + want: podSpec( + []corev1.Container{ + servingContainer( + func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" }, + getContainer(), + withEnvVar("K_CONFIGURATION", "cfg"), + withEnvVar("K_SERVICE", "svc"), + ), + sidecarContainer(sidecarContainerName, + func(container *corev1.Container) { + container.Image = "ubuntu@sha256:deadbffe" + }, + getContainer(), + withEnvVar("K_CONFIGURATION", "cfg"), + withEnvVar("K_SERVICE", "svc"), + ), + sidecarContainer(sidecarContainerName2, + getContainer(), + func(container *corev1.Container) { + container.Name = "sidecar-container-2" + container.Image = "alpine@sha256:deadbfff" + }, + withEnvVar("K_CONFIGURATION", "cfg"), + withEnvVar("K_SERVICE", "svc"), + ), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "1"), + withEnvVar("SERVING_SERVICE", "svc"), + ), + }), + }, { + name: "complex pod spec for multiple containers with container data only to serving containers", + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }, { + Name: sidecarContainerName, + Image: "ubuntu", + }}), + withContainerConcurrency(1), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }, { + ImageDigest: "ubuntu@sha256:deadbffe", + }}), + func(revision *v1.Revision) { + revision.ObjectMeta.Labels = map[string]string{ + serving.ConfigurationLabelKey: "cfg", + serving.ServiceLabelKey: "svc", } - revision.Spec.GetContainer().TerminationMessagePolicy = corev1.TerminationMessageReadFile + container(revision.Spec.GetContainer(), updateContainer()) }, ), want: podSpec( []corev1.Container{ - userContainer( + servingContainer( + func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }, + getContainer(), func(container *corev1.Container) { - container.Command = []string{"/bin/bash"} - container.Args = []string{"-c", "echo Hello world"} - container.Env = append([]corev1.EnvVar{{ - Name: "FOO", - Value: "bar", - }, { - Name: "BAZ", - Value: "blah", - }}, container.Env...) - container.Resources = corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("666Mi"), - corev1.ResourceCPU: resource.MustParse("666m"), - }, - Limits: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("888Mi"), - corev1.ResourceCPU: resource.MustParse("888m"), - }, - } - container.TerminationMessagePolicy = corev1.TerminationMessageReadFile + container.Ports[0].ContainerPort = 8888 + }, + withEnvVar("PORT", "8888"), + withEnvVar("K_CONFIGURATION", "cfg"), + withEnvVar("K_SERVICE", "svc"), + ), + sidecarContainer(sidecarContainerName, + func(container *corev1.Container) { + container.Image = "ubuntu@sha256:deadbffe" }, withEnvVar("K_CONFIGURATION", "cfg"), withEnvVar("K_SERVICE", "svc"), @@ -702,6 +995,8 @@ func TestMakePodSpec(t *testing.T) { queueContainer( withEnvVar("CONTAINER_CONCURRENCY", "1"), withEnvVar("SERVING_SERVICE", "svc"), + withEnvVar("USER_PORT", "8888"), + withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8888,"host":"127.0.0.1"}}`), ), }), }} @@ -738,41 +1033,73 @@ func TestMakeDeployment(t *testing.T) { }{{ name: "with concurrency=1", rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8888, + }}, + ReadinessProbe: withTCPReadinessProbe(12345), + }, { + Name: sidecarContainerName, + Image: "ubuntu", + }}), withoutLabels, withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }, { + ImageDigest: "ubuntu@sha256:deadbffe", + }})), want: appsv1deployment(), }, { name: "with owner", rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "ubuntu", + ReadinessProbe: withTCPReadinessProbe(12345), + }}), withoutLabels, withOwnerReference("parent-config"), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }})), want: appsv1deployment(), }, { name: "with sidecar annotation override", - rev: revision("bar", "foo", withoutLabels, func(revision *v1.Revision) { - revision.ObjectMeta.Annotations = map[string]string{ - sidecarIstioInjectAnnotation: "false", - } - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "ubuntu", + ReadinessProbe: withTCPReadinessProbe(12345), + }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), + withoutLabels, func(revision *v1.Revision) { + revision.ObjectMeta.Annotations = map[string]string{ + sidecarIstioInjectAnnotation: "false", + } + }), want: appsv1deployment(func(deploy *appsv1.Deployment) { - deploy.ObjectMeta.Annotations[sidecarIstioInjectAnnotation] = "false" - deploy.Spec.Template.ObjectMeta.Annotations[sidecarIstioInjectAnnotation] = "false" + deploy.Annotations[sidecarIstioInjectAnnotation] = "false" + deploy.Spec.Template.Annotations[sidecarIstioInjectAnnotation] = "false" }), }, { name: "with ProgressDeadline override", dc: deployment.Config{ ProgressDeadline: 42 * time.Second, }, - rev: revision("bar", "foo", withoutLabels, func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + Image: "ubuntu", + ReadinessProbe: withTCPReadinessProbe(12345), + }}), + WithContainerStatuses([]v1.ContainerStatuses{{ + ImageDigest: "busybox@sha256:deadbeef", + }}), withoutLabels), want: appsv1deployment(func(deploy *appsv1.Deployment) { deploy.Spec.ProgressDeadlineSeconds = ptr.Int32(42) }), diff --git a/pkg/reconciler/revision/resources/queue_test.go b/pkg/reconciler/revision/resources/queue_test.go index 6d421c6c6647..1a1a8cdd6788 100644 --- a/pkg/reconciler/revision/resources/queue_test.go +++ b/pkg/reconciler/revision/resources/queue_test.go @@ -56,6 +56,12 @@ var ( }, } + containers = []corev1.Container{{ + Name: servingContainerName, + Image: "busybox", + ReadinessProbe: withTCPReadinessProbe(v1.DefaultUserPort), + }} + // The default CM values. logConfig logging.Config traceConfig tracingconfig.Config @@ -77,10 +83,8 @@ func TestMakeQueueContainer(t *testing.T) { }{{ name: "no owner no autoscaler single", rev: revision("bar", "foo", - withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + withContainers(containers), + withContainerConcurrency(1)), want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ "CONTAINER_CONCURRENCY": "1", @@ -89,19 +93,15 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "custom sidecar image, container port, protocol", rev: revision("bar", "foo", - withContainerConcurrency(1), - func(revision *v1.Revision) { - revision.Spec.PodSpec = corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: containerName, - ReadinessProbe: testProbe, - Ports: []corev1.ContainerPort{{ - ContainerPort: 1955, - Name: string(networking.ProtocolH2C), - }}, - }}, - } - }), + withContainers([]corev1.Container{{ + Name: servingContainerName, + ReadinessProbe: testProbe, + Ports: []corev1.ContainerPort{{ + ContainerPort: 1955, + Name: string(networking.ProtocolH2C), + }}, + }}), + withContainerConcurrency(1)), cc: deployment.Config{ QueueSidecarImage: "alpine", }, @@ -117,12 +117,12 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "service name in labels", rev: revision("bar", "foo", + withContainers(containers), withContainerConcurrency(1), func(revision *v1.Revision) { revision.ObjectMeta.Labels = map[string]string{ serving.ServiceLabelKey: "svc", } - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ @@ -132,6 +132,7 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "config owner as env var, zero concurrency", rev: revision("blah", "baz", + withContainers(containers), withContainerConcurrency(0), func(revision *v1.Revision) { revision.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{ @@ -141,7 +142,6 @@ func TestMakeQueueContainer(t *testing.T) { Controller: ptr.Bool(true), BlockOwnerDeletion: ptr.Bool(true), }} - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) }), want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ @@ -154,10 +154,8 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "logging configuration as env var", rev: revision("this", "log", - withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + withContainers(containers), + withContainerConcurrency(1)), lc: logging.Config{ LoggingConfig: "The logging configuration goes here", LoggingLevel: map[string]zapcore.Level{ @@ -175,10 +173,8 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "container concurrency 10", rev: revision("bar", "foo", - withContainerConcurrency(10), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + withContainers(containers), + withContainerConcurrency(10)), want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ "CONTAINER_CONCURRENCY": "10", @@ -187,10 +183,8 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "request log configuration as env var", rev: revision("bar", "foo", - withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + withContainers(containers), + withContainerConcurrency(1)), oc: metrics.ObservabilityConfig{ RequestLogTemplate: "test template", EnableProbeRequestLog: true, @@ -204,10 +198,8 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "request metrics backend as env var", rev: revision("bar", "foo", - withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + withContainers(containers), + withContainerConcurrency(1)), oc: metrics.ObservabilityConfig{ RequestMetricsBackend: "prometheus", }, @@ -219,10 +211,8 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "enable profiling", rev: revision("bar", "foo", - withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - }), + withContainers(containers), + withContainerConcurrency(1)), oc: metrics.ObservabilityConfig{EnableProfiling: true}, want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ @@ -233,14 +223,11 @@ func TestMakeQueueContainer(t *testing.T) { }, { name: "custom TimeoutSeconds", rev: revision("bar", "foo", - withContainerConcurrency(1), - func(revision *v1.Revision) { - container(revision.Spec.GetContainer(), withTCPReadinessProbe()) - revision.Spec.TimeoutSeconds = ptr.Int64(99) - }), + withContainers(containers), + withContainerConcurrency(1)), want: queueContainer(func(c *corev1.Container) { c.Env = env(map[string]string{ - "REVISION_TIMEOUT_SECONDS": "99", + "REVISION_TIMEOUT_SECONDS": "45", }) }), }} @@ -250,7 +237,7 @@ func TestMakeQueueContainer(t *testing.T) { if len(test.rev.Spec.PodSpec.Containers) == 0 { test.rev.Spec.PodSpec = corev1.PodSpec{ Containers: []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: testProbe, }}, } @@ -287,7 +274,7 @@ func TestMakeQueueContainerWithPercentageAnnotation(t *testing.T) { serving.QueueSideCarResourcePercentageAnnotation: "20", } revision.Spec.PodSpec.Containers = []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: testProbe, Resources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{ @@ -313,7 +300,7 @@ func TestMakeQueueContainerWithPercentageAnnotation(t *testing.T) { serving.QueueSideCarResourcePercentageAnnotation: "0.2", } revision.Spec.PodSpec.Containers = []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: testProbe, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -339,7 +326,7 @@ func TestMakeQueueContainerWithPercentageAnnotation(t *testing.T) { serving.QueueSideCarResourcePercentageAnnotation: "foo", } revision.Spec.PodSpec.Containers = []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: testProbe, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -364,7 +351,7 @@ func TestMakeQueueContainerWithPercentageAnnotation(t *testing.T) { serving.QueueSideCarResourcePercentageAnnotation: "100", } revision.Spec.PodSpec.Containers = []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: testProbe, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -406,7 +393,7 @@ func TestProbeGenerationHTTPDefaults(t *testing.T) { withContainerConcurrency(1), func(revision *v1.Revision) { revision.Spec.PodSpec.Containers = []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ @@ -477,7 +464,7 @@ func TestProbeGenerationHTTP(t *testing.T) { withContainerConcurrency(1), func(revision *v1.Revision) { revision.Spec.PodSpec.Containers = []corev1.Container{{ - Name: containerName, + Name: servingContainerName, Ports: []corev1.ContainerPort{{ ContainerPort: userPort, }}, @@ -565,7 +552,7 @@ func TestTCPProbeGeneration(t *testing.T) { TimeoutSeconds: ptr.Int64(45), PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ - Name: containerName, + Name: servingContainerName, Ports: []corev1.ContainerPort{{ ContainerPort: userPort, }}, @@ -601,7 +588,7 @@ func TestTCPProbeGeneration(t *testing.T) { TimeoutSeconds: ptr.Int64(45), PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ - Name: containerName, + Name: servingContainerName, ReadinessProbe: &corev1.Probe{ Handler: corev1.Handler{ TCPSocket: &corev1.TCPSocketAction{}, @@ -656,7 +643,7 @@ func TestTCPProbeGeneration(t *testing.T) { TimeoutSeconds: ptr.Int64(45), PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ - Name: containerName, + Name: servingContainerName, Ports: []corev1.ContainerPort{{ ContainerPort: userPort, }}, diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index 4a12c0599a0e..f128bf703903 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -85,9 +85,8 @@ func TestReconcile(t *testing.T) { WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "first-reconcile", // The first reconciliation Populates the following status properties. - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "first-reconcile"}, - })), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), + withDefaultContainerStatuses()), }}, Key: "foo/first-reconcile", }, { @@ -110,9 +109,8 @@ func TestReconcile(t *testing.T) { WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "update-status-failure", // Despite failure, the following status properties are set. - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{ - {Name: "update-status-failure"}, - })), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), + withDefaultContainerStatuses()), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for %q: %v", @@ -140,7 +138,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "create-pa-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "create-pa-failure"}})), + MarkDeploying("Deploying"), withDefaultContainerStatuses()), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", `failed to create PA "create-pa-failure": inducing failure for create podautoscalers`), @@ -166,7 +164,7 @@ func TestReconcile(t *testing.T) { Object: rev("foo", "create-user-deploy-failure", // Despite failure, the following status properties are set. WithLogURL, WithInitRevConditions, - MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "create-user-deploy-failure"}})), + MarkDeploying("Deploying"), withDefaultContainerStatuses()), }}, WantEvents: []string{ Eventf(corev1.EventTypeWarning, "InternalError", @@ -181,7 +179,7 @@ func TestReconcile(t *testing.T) { // are necessary. Objects: []runtime.Object{ rev("foo", "stable-reconcile", WithLogURL, AllUnknownConditions, - WithContainerStatuses([]v1.ContainerStatuses{{Name: "stable-reconcile"}})), + withDefaultContainerStatuses()), pa("foo", "stable-reconcile", WithReachability(asv1a1.ReachabilityUnknown)), deploy(t, "foo", "stable-reconcile"), @@ -195,7 +193,7 @@ func TestReconcile(t *testing.T) { // with our desired spec. Objects: []runtime.Object{ rev("foo", "fix-containers", - WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-containers"}})), + WithLogURL, AllUnknownConditions, withDefaultContainerStatuses()), pa("foo", "fix-containers", WithReachability(asv1a1.ReachabilityUnknown)), changeContainers(deploy(t, "foo", "fix-containers")), image("foo", "fix-containers"), @@ -213,7 +211,8 @@ func TestReconcile(t *testing.T) { }, Objects: []runtime.Object{ rev("foo", "failure-update-deploy", - withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "failure-update-deploy"}})), + withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions, + withDefaultContainerStatuses()), pa("foo", "failure-update-deploy"), changeContainers(deploy(t, "foo", "failure-update-deploy")), image("foo", "failure-update-deploy"), @@ -234,7 +233,8 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "stable-deactivation", WithLogURL, MarkRevisionReady, - MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{{Name: "stable-deactivation"}})), + MarkInactive("NoTraffic", "This thing is inactive."), + withDefaultContainerStatuses()), pa("foo", "stable-deactivation", WithNoTraffic("NoTraffic", "This thing is inactive.")), deploy(t, "foo", "stable-deactivation"), @@ -255,7 +255,7 @@ func TestReconcile(t *testing.T) { WithLogURL, // When the endpoint and pa are ready, then we will see the // Revision become ready. - MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-ready"}})), + MarkRevisionReady, withDefaultContainerStatuses()), }}, WantEvents: []string{ Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), @@ -276,7 +276,7 @@ func TestReconcile(t *testing.T) { }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-not-ready", - WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-not-ready"}}), + WithLogURL, MarkRevisionReady, withDefaultContainerStatuses(), withK8sServiceName("its-not-confidential"), // When we reconcile a ready state and our pa is in an activating // state, we should see the following mutation. @@ -297,7 +297,7 @@ func TestReconcile(t *testing.T) { }, WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ Object: rev("foo", "pa-inactive", - WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-inactive"}}), + WithLogURL, MarkRevisionReady, withDefaultContainerStatuses(), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. MarkInactive("NoTraffic", "This thing is inactive.")), @@ -322,7 +322,8 @@ func TestReconcile(t *testing.T) { withK8sServiceName("pa-inactive-svc"), // When we reconcile an "all ready" revision when the PA // is inactive, we should see the following change. - MarkInactive("NoTraffic", "This thing is inactive."), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pa-inactive"}})), + MarkInactive("NoTraffic", "This thing is inactive."), + withDefaultContainerStatuses()), }}, Key: "foo/pa-inactive", }, { @@ -343,7 +344,8 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When our reconciliation has to change the service // we should see the following mutations to status. - withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-mutated-pa"}})), + withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady, + withDefaultContainerStatuses()), }}, WantUpdates: []clientgotesting.UpdateActionImpl{{ Object: pa("foo", "fix-mutated-pa", WithTraffic, @@ -356,7 +358,7 @@ func TestReconcile(t *testing.T) { Objects: []runtime.Object{ rev("foo", "fix-mutated-pa-fail", withK8sServiceName("some-old-stuff"), - WithLogURL, AllUnknownConditions, WithContainerStatuses([]v1.ContainerStatuses{{Name: "fix-mutated-pa-fail"}})), + WithLogURL, AllUnknownConditions, withDefaultContainerStatuses()), pa("foo", "fix-mutated-pa-fail", WithProtocolType(networking.ProtocolH2C), WithReachability(asv1a1.ReachabilityUnknown)), deploy(t, "foo", "fix-mutated-pa-fail"), image("foo", "fix-mutated-pa-fail"), @@ -391,7 +393,7 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the PDE state. - MarkProgressDeadlineExceeded("I timed out!"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "deploy-timeout"}})), + MarkProgressDeadlineExceeded("I timed out!"), withDefaultContainerStatuses()), }}, Key: "foo/deploy-timeout", }, { @@ -412,150 +414,150 @@ func TestReconcile(t *testing.T) { WithLogURL, AllUnknownConditions, // When the revision is reconciled after a Deployment has // timed out, we should see it marked with the FailedCreate state. - MarkResourcesUnavailable("FailedCreate", "I replica failed!"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "deploy-replica-failure"}})), + MarkResourcesUnavailable("FailedCreate", "I replica failed!"), + withDefaultContainerStatuses()), }}, Key: "foo/deploy-replica-failure", - }, - { - Name: "surface ImagePullBackoff", - // Test the propagation of ImagePullBackoff from user container. - Objects: []runtime.Object{ - rev("foo", "pull-backoff", - withK8sServiceName("the-taxman"), WithLogURL, MarkActivating("Deploying", ""), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pull-backoff"}})), - pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnknown)), // pa can't be ready since deployment times out. - pod(t, "foo", "pull-backoff", WithWaitingContainer("pull-backoff", "ImagePullBackoff", "can't pull it")), - timeoutDeploy(deploy(t, "foo", "pull-backoff"), "Timed out!"), - image("foo", "pull-backoff"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pull-backoff", - WithLogURL, AllUnknownConditions, - MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pull-backoff"}})), - }}, - WantUpdates: []clientgotesting.UpdateActionImpl{{ - Object: pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnreachable)), - }}, - Key: "foo/pull-backoff", - }, { - Name: "surface pod errors", - // Test the propagation of the termination state of a Pod into the revision. - // This initializes the world to the stable state after its first reconcile, - // but changes the user deployment to have a failing pod. It then verifies - // that Reconcile propagates this into the status of the Revision. - Objects: []runtime.Object{ - rev("foo", "pod-error", - withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive), - pa("foo", "pod-error"), // PA can't be ready, since no traffic. - pod(t, "foo", "pod-error", WithFailingContainer("pod-error", 5, "I failed man!")), - deploy(t, "foo", "pod-error"), - image("foo", "pod-error"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pod-error", - WithLogURL, AllUnknownConditions, MarkContainerExiting(5, - v1.RevisionContainerExitingMessage("I failed man!")), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pod-error"}})), - }}, - Key: "foo/pod-error", - }, { - Name: "surface pod schedule errors", - // Test the propagation of the scheduling errors of Pod into the revision. - // This initializes the world to unschedule pod. It then verifies - // that Reconcile propagates this into the status of the Revision. - Objects: []runtime.Object{ - rev("foo", "pod-schedule-error", - withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive), - pa("foo", "pod-schedule-error"), // PA can't be ready, since no traffic. - pod(t, "foo", "pod-schedule-error", WithUnschedulableContainer("Insufficient energy", "Unschedulable")), - deploy(t, "foo", "pod-schedule-error"), - image("foo", "pod-schedule-error"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "pod-schedule-error", - WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", - "Unschedulable"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "pod-schedule-error"}})), - }}, - Key: "foo/pod-schedule-error", - }, { - Name: "ready steady state", - // Test the transition that Reconcile makes when Endpoints become ready on the - // SKS owned services, which is signalled by pa having service name. - // This puts the world into the stable post-reconcile state for an Active - // Revision. It then creates an Endpoints resource with active subsets. - // This signal should make our Reconcile mark the Revision as Ready. - Objects: []runtime.Object{ - rev("foo", "steady-ready", withK8sServiceName("very-steady"), WithLogURL), - pa("foo", "steady-ready", WithTraffic, WithPAStatusService("steadier-even")), - deploy(t, "foo", "steady-ready"), - image("foo", "steady-ready"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, - // All resources are ready to go, we should see the revision being - // marked ready - MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "steady-ready"}})), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), - }, - Key: "foo/steady-ready", - }, { - Name: "lost pa owner ref", - WantErr: true, - Objects: []runtime.Object{ - rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, - MarkRevisionReady), - pa("foo", "missing-owners", WithTraffic, WithPodAutoscalerOwnersRemoved), - deploy(t, "foo", "missing-owners"), - image("foo", "missing-owners"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, - MarkRevisionReady, - // When we're missing the OwnerRef for PodAutoscaler we see this update. - MarkResourceNotOwned("PodAutoscaler", "missing-owners"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own PodAutoscaler: "missing-owners"`), - }, - Key: "foo/missing-owners", - }, { - Name: "lost deployment owner ref", - WantErr: true, - Objects: []runtime.Object{ - rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, - MarkRevisionReady, WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), - pa("foo", "missing-owners", WithTraffic), - noOwner(deploy(t, "foo", "missing-owners")), - image("foo", "missing-owners"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, - MarkRevisionReady, - // When we're missing the OwnerRef for Deployment we see this update. - MarkResourceNotOwned("Deployment", "missing-owners-deployment"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "missing-owners"}})), - }}, - WantEvents: []string{ - Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own Deployment: "missing-owners-deployment"`), - }, - Key: "foo/missing-owners", - }, { - Name: "image pull secrets", - // This test case tests that the image pull secrets from revision propagate to deployment and image - Objects: []runtime.Object{ - rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret")), - }, - WantCreates: []runtime.Object{ - pa("foo", "image-pull-secrets"), - deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets"), "foo-secret"), - imagePullSecrets(image("foo", "image-pull-secrets"), "foo-secret"), - }, - WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - Object: rev("foo", "image-pull-secrets", - WithImagePullSecrets("foo-secret"), - WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), WithContainerStatuses([]v1.ContainerStatuses{{Name: "image-pull-secrets"}})), - }}, - Key: "foo/image-pull-secrets", - }} + }, { + Name: "surface ImagePullBackoff", + // Test the propagation of ImagePullBackoff from user container. + Objects: []runtime.Object{ + rev("foo", "pull-backoff", + withK8sServiceName("the-taxman"), WithLogURL, MarkActivating("Deploying", "")), + pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnknown)), // pa can't be ready since deployment times out. + pod(t, "foo", "pull-backoff", WithWaitingContainer("pull-backoff", "ImagePullBackoff", "can't pull it")), + timeoutDeploy(deploy(t, "foo", "pull-backoff"), "Timed out!"), + image("foo", "pull-backoff"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pull-backoff", + WithLogURL, AllUnknownConditions, + MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), withDefaultContainerStatuses()), + }}, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnreachable)), + }}, + Key: "foo/pull-backoff", + }, { + Name: "surface pod errors", + // Test the propagation of the termination state of a Pod into the revision. + // This initializes the world to the stable state after its first reconcile, + // but changes the user deployment to have a failing pod. It then verifies + // that Reconcile propagates this into the status of the Revision. + Objects: []runtime.Object{ + rev("foo", "pod-error", + withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive), + pa("foo", "pod-error"), // PA can't be ready, since no traffic. + pod(t, "foo", "pod-error", WithFailingContainer("pod-error", 5, "I failed man!")), + deploy(t, "foo", "pod-error"), + image("foo", "pod-error"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pod-error", + WithLogURL, AllUnknownConditions, MarkContainerExiting(5, + v1.RevisionContainerExitingMessage("I failed man!")), withDefaultContainerStatuses()), + }}, + Key: "foo/pod-error", + }, { + Name: "surface pod schedule errors", + // Test the propagation of the scheduling errors of Pod into the revision. + // This initializes the world to unschedule pod. It then verifies + // that Reconcile propagates this into the status of the Revision. + Objects: []runtime.Object{ + rev("foo", "pod-schedule-error", + withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive), + pa("foo", "pod-schedule-error"), // PA can't be ready, since no traffic. + pod(t, "foo", "pod-schedule-error", WithUnschedulableContainer("Insufficient energy", "Unschedulable")), + deploy(t, "foo", "pod-schedule-error"), + image("foo", "pod-schedule-error"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pod-schedule-error", + WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", + "Unschedulable"), withDefaultContainerStatuses()), + }}, + Key: "foo/pod-schedule-error", + }, { + Name: "ready steady state", + // Test the transition that Reconcile makes when Endpoints become ready on the + // SKS owned services, which is signalled by pa having service name. + // This puts the world into the stable post-reconcile state for an Active + // Revision. It then creates an Endpoints resource with active subsets. + // This signal should make our Reconcile mark the Revision as Ready. + Objects: []runtime.Object{ + rev("foo", "steady-ready", withK8sServiceName("very-steady"), WithLogURL), + pa("foo", "steady-ready", WithTraffic, WithPAStatusService("steadier-even")), + deploy(t, "foo", "steady-ready"), + image("foo", "steady-ready"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, + // All resources are ready to go, we should see the revision being + // marked ready + MarkRevisionReady, withDefaultContainerStatuses()), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), + }, + Key: "foo/steady-ready", + }, { + Name: "lost pa owner ref", + WantErr: true, + Objects: []runtime.Object{ + rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, + MarkRevisionReady), + pa("foo", "missing-owners", WithTraffic, WithPodAutoscalerOwnersRemoved), + deploy(t, "foo", "missing-owners"), + image("foo", "missing-owners"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, + MarkRevisionReady, + // When we're missing the OwnerRef for PodAutoscaler we see this update. + MarkResourceNotOwned("PodAutoscaler", "missing-owners"), withDefaultContainerStatuses()), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own PodAutoscaler: "missing-owners"`), + }, + Key: "foo/missing-owners", + }, { + Name: "lost deployment owner ref", + WantErr: true, + Objects: []runtime.Object{ + rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, + MarkRevisionReady), + pa("foo", "missing-owners", WithTraffic), + noOwner(deploy(t, "foo", "missing-owners")), + image("foo", "missing-owners"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, + MarkRevisionReady, + // When we're missing the OwnerRef for Deployment we see this update. + MarkResourceNotOwned("Deployment", "missing-owners-deployment"), withDefaultContainerStatuses()), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own Deployment: "missing-owners-deployment"`), + }, + Key: "foo/missing-owners", + }, { + Name: "image pull secrets", + // This test case tests that the image pull secrets from revision propagate to deployment and image + Objects: []runtime.Object{ + rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret")), + }, + WantCreates: []runtime.Object{ + pa("foo", "image-pull-secrets"), + deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets"), "foo-secret"), + imagePullSecrets(image("foo", "image-pull-secrets"), "foo-secret"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "image-pull-secrets", + WithImagePullSecrets("foo-secret"), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying"), withDefaultContainerStatuses()), + }}, + Key: "foo/image-pull-secrets", + }} table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { r := &Reconciler{ @@ -661,6 +663,15 @@ func withK8sServiceName(sn string) RevisionOption { } } +func withDefaultContainerStatuses() RevisionOption { + return func(r *v1.Revision) { + r.Status.ContainerStatuses = []v1.ContainerStatuses{{ + Name: r.Name, + ImageDigest: "", + }} + } +} + // TODO(mattmoor): Come up with a better name for this. func AllUnknownConditions(r *v1.Revision) { WithInitRevConditions(r) diff --git a/pkg/webhook/podspec_dryrun.go b/pkg/webhook/podspec_dryrun.go index 7e8234782a12..e810d095c0b0 100644 --- a/pkg/webhook/podspec_dryrun.go +++ b/pkg/webhook/podspec_dryrun.go @@ -57,10 +57,9 @@ func validatePodSpec(ctx context.Context, ps v1.RevisionSpec, namespace string, Spec: ps, } rev.SetDefaults(ctx) - userContainer := resources.BuildUserContainer(rev) - podSpec := resources.BuildPodSpec(rev, []corev1.Container{*userContainer}) + podSpec := resources.BuildPodSpec(rev, resources.BuildUserContainers(rev)) - // Make a dummy pod with the template Revions & PodSpec and dryrun call to API-server + // Make a dummy pod with the template Revisions & PodSpec and dryrun call to API-server pod := &corev1.Pod{ ObjectMeta: om, Spec: *podSpec,