diff --git a/pkg/controller/revision/autoscaler.go b/pkg/controller/revision/autoscaler.go index 8d8abb53a1d2..135fe4f7d73a 100644 --- a/pkg/controller/revision/autoscaler.go +++ b/pkg/controller/revision/autoscaler.go @@ -34,22 +34,15 @@ import ( // MakeServingAutoscalerDeployment creates the deployment of the // autoscaler for a particular revision. -func MakeServingAutoscalerDeployment(rev *v1alpha1.Revision, autoscalerImage string) *appsv1.Deployment { +func MakeServingAutoscalerDeployment(rev *v1alpha1.Revision, autoscalerImage string, replicaCount int32) *appsv1.Deployment { configName := "" if owner := metav1.GetControllerOf(rev); owner != nil && owner.Kind == "Configuration" { configName = owner.Name } - rollingUpdateConfig := appsv1.RollingUpdateDeployment{ - MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, - MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, - } - annotations := MakeServingResourceAnnotations(rev) annotations[sidecarIstioInjectAnnotation] = "true" - replicas := int32(1) - const autoscalerConfigName = "config-autoscaler" autoscalerConfigVolume := corev1.Volume{ Name: autoscalerConfigName, @@ -83,12 +76,8 @@ func MakeServingAutoscalerDeployment(rev *v1alpha1.Revision, autoscalerImage str OwnerReferences: []metav1.OwnerReference{*controller.NewRevisionControllerRef(rev)}, }, Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, + Replicas: &replicaCount, Selector: MakeServingResourceSelector(rev), - Strategy: appsv1.DeploymentStrategy{ - Type: "RollingUpdate", - RollingUpdate: &rollingUpdateConfig, - }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: makeServingAutoScalerLabels(rev), diff --git a/pkg/controller/revision/pod.go b/pkg/controller/revision/pod.go index 6c9d4ecf6e00..fd3b4a7dc105 100644 --- a/pkg/controller/revision/pod.go +++ b/pkg/controller/revision/pod.go @@ -205,12 +205,7 @@ func MakeServingPodSpec(rev *v1alpha1.Revision, controllerConfig *ControllerConf // MakeServingDeployment creates a deployment. func MakeServingDeployment(logger *zap.SugaredLogger, rev *v1alpha1.Revision, - networkConfig *NetworkConfig, controllerConfig *ControllerConfig) *appsv1.Deployment { - rollingUpdateConfig := appsv1.RollingUpdateDeployment{ - MaxUnavailable: &servingPodMaxUnavailable, - MaxSurge: &servingPodMaxSurge, - } - + networkConfig *NetworkConfig, controllerConfig *ControllerConfig, replicaCount int32) *appsv1.Deployment { podTemplateAnnotations := MakeServingResourceAnnotations(rev) podTemplateAnnotations[sidecarIstioInjectAnnotation] = "true" @@ -241,12 +236,8 @@ func MakeServingDeployment(logger *zap.SugaredLogger, rev *v1alpha1.Revision, OwnerReferences: []metav1.OwnerReference{*controller.NewRevisionControllerRef(rev)}, }, Spec: appsv1.DeploymentSpec{ - Replicas: &servingPodReplicaCount, - Selector: MakeServingResourceSelector(rev), - Strategy: appsv1.DeploymentStrategy{ - Type: "RollingUpdate", - RollingUpdate: &rollingUpdateConfig, - }, + Replicas: &replicaCount, + Selector: MakeServingResourceSelector(rev), ProgressDeadlineSeconds: &progressDeadlineSeconds, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/controller/revision/revision.go b/pkg/controller/revision/revision.go index 86dfdd8dbe49..9f266d0b6d66 100644 --- a/pkg/controller/revision/revision.go +++ b/pkg/controller/revision/revision.go @@ -32,12 +32,14 @@ import ( "github.com/knative/serving/pkg" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/josephburnett/k8sflag/pkg/k8sflag" "github.com/knative/serving/pkg/apis/serving" "github.com/knative/serving/pkg/logging" "github.com/knative/serving/pkg/logging/logkey" "go.uber.org/zap" "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/resource" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -50,10 +52,8 @@ import ( appsv1informers "k8s.io/client-go/informers/apps/v1" corev1informers "k8s.io/client-go/informers/core/v1" - "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/runtime" appsv1listers "k8s.io/client-go/listers/apps/v1" corev1listers "k8s.io/client-go/listers/core/v1" @@ -88,11 +88,8 @@ const ( ) var ( - servingPodReplicaCount = int32(1) - servingPodMaxUnavailable = intstr.IntOrString{Type: intstr.Int, IntVal: 1} - servingPodMaxSurge = intstr.IntOrString{Type: intstr.Int, IntVal: 1} - foregroundDeletion = metav1.DeletePropagationForeground - fgDeleteOptions = &metav1.DeleteOptions{ + foregroundDeletion = metav1.DeletePropagationForeground + fgDeleteOptions = &metav1.DeleteOptions{ PropagationPolicy: &foregroundDeletion, } ) @@ -273,7 +270,7 @@ func (c *Controller) Reconcile(key string) error { // Get the Revision resource with this namespace/name original, err := c.revisionLister.Revisions(namespace).Get(name) // The resource may no longer exist, in which case we stop processing. - if errors.IsNotFound(err) { + if apierrs.IsNotFound(err) { runtime.HandleError(fmt.Errorf("revision %q in work queue no longer exists", key)) return nil } else if err != nil { @@ -400,43 +397,40 @@ func (c *Controller) EnqueueEndpointsRevision(obj interface{}) { } func (c *Controller) reconcileDeployment(ctx context.Context, rev *v1alpha1.Revision) error { - logger := logging.FromContext(ctx) ns := controller.GetServingNamespaceName(rev.Namespace) deploymentName := controller.GetRevisionDeploymentName(rev) + logger := logging.FromContext(ctx).With(zap.String(logkey.Deployment, deploymentName)) - deployment, err := c.deploymentLister.Deployments(ns).Get(deploymentName) + deployment, getDepErr := c.deploymentLister.Deployments(ns).Get(deploymentName) switch rev.Spec.ServingState { - case v1alpha1.RevisionServingStateActive: - // When Active, the Deployment should exist and have a particular specification. - if apierrs.IsNotFound(err) { - // If it does not exist, then create it. + case v1alpha1.RevisionServingStateActive, v1alpha1.RevisionServingStateReserve: + // When Active or Reserved, deployment should exist and have a particular specification. + if apierrs.IsNotFound(getDepErr) { + // Deployment does not exist. Create it. rev.Status.MarkDeploying("Deploying") + var err error deployment, err = c.createDeployment(ctx, rev) if err != nil { - logger.Errorf("Error creating Deployment %q: %v", deploymentName, err) + logger.Errorf("Error creating deployment %q: %v", deploymentName, err) return err } - logger.Infof("Created Deployment %q", deploymentName) - } else if err != nil { - logger.Errorf("Error reconciling Active Deployment %q: %v", deploymentName, err) - return err + logger.Infof("Created deployment %q", deploymentName) + } else if getDepErr != nil { + logger.Errorf("Error reconciling deployment %q: %v", deploymentName, getDepErr) + return getDepErr } else { - // TODO(mattmoor): Don't reconcile Deployments until we can avoid fighting with - // its defaulter. - // // If it exists, then make sure if looks as we expect. - // // It may change if a user edits things around our controller, which we - // // should not allow, or if our expectations of how the deployment should look - // // changes (e.g. we update our controller with new sidecars). - // var changed Changed - // deployment, changed, err = c.checkAndUpdateDeployment(ctx, rev, deployment) - // if err != nil { - // logger.Errorf("Error updating Deployment %q: %v", deploymentName, err) - // return err - // } - // if changed == WasChanged { - // logger.Infof("Updated Deployment %q", deploymentName) - // rev.Status.MarkDeploying("Updating") - // } + // Deployment exist. Update the replica count based on the serving state if necessary + var changed Changed + var err error + deployment, changed, err = c.checkAndUpdateDeployment(ctx, rev, deployment) + if err != nil { + logger.Errorf("Error updating deployment %q: %v", deploymentName, err) + return err + } + if changed == WasChanged { + logger.Infof("Updated deployment %q", deploymentName) + rev.Status.MarkDeploying("Updating") + } } // Now that we have a Deployment, determine whether there is any relevant @@ -449,17 +443,17 @@ func (c *Controller) reconcileDeployment(ctx context.Context, rev *v1alpha1.Revi } return nil - case v1alpha1.RevisionServingStateReserve, v1alpha1.RevisionServingStateRetired: - // When Reserve or Retired, we remove the underlying Deployment. - if apierrs.IsNotFound(err) { + case v1alpha1.RevisionServingStateRetired: + // When Retired, we remove the underlying Deployment. + if apierrs.IsNotFound(getDepErr) { // If it does not exist, then we have nothing to do. return nil } if err := c.deleteDeployment(ctx, deployment); err != nil { - logger.Errorf("Error deleting Deployment %q: %v", deploymentName, err) + logger.Errorf("Error deleting deployment %q: %v", deploymentName, err) return err } - logger.Infof("Deleted Deployment %q", deploymentName) + logger.Infof("Deleted deployment %q", deploymentName) rev.Status.MarkInactive() return nil @@ -471,10 +465,12 @@ func (c *Controller) reconcileDeployment(ctx context.Context, rev *v1alpha1.Revi func (c *Controller) createDeployment(ctx context.Context, rev *v1alpha1.Revision) (*appsv1.Deployment, error) { logger := logging.FromContext(ctx) - ns := controller.GetServingNamespaceName(rev.Namespace) - // Create the deployment. - deployment := MakeServingDeployment(logger, rev, c.getNetworkConfig(), c.controllerConfig) + var replicaCount int32 = 1 + if rev.Spec.ServingState == v1alpha1.RevisionServingStateReserve { + replicaCount = 0 + } + deployment := MakeServingDeployment(logger, rev, c.getNetworkConfig(), c.controllerConfig, replicaCount) // Resolve tag image references to digests. if err := c.resolver.Resolve(deployment); err != nil { @@ -483,37 +479,37 @@ func (c *Controller) createDeployment(ctx context.Context, rev *v1alpha1.Revisio return nil, fmt.Errorf("Error resolving container to digest: %v", err) } - return c.KubeClientSet.AppsV1().Deployments(ns).Create(deployment) + return c.KubeClientSet.AppsV1().Deployments(deployment.Namespace).Create(deployment) } -// TODO(mattmoor): See the comment at the commented call site above. -// func (c *Controller) checkAndUpdateDeployment(ctx context.Context, rev *v1alpha1.Revision, deployment *appsv1.Deployment) (*appsv1.Deployment, Changed, error) { -// logger := logging.FromContext(ctx) - -// desiredDeployment := MakeServingDeployment(logger, rev, c.getNetworkConfig(), c.controllerConfig) - -// // Copy the userContainerImage digest and the replica count. -// // We don't want autoscaling differences or user updates to the image tag -// // to trigger redeployments. -// desiredDeployment.Spec.Replicas = deployment.Spec.Replicas -// pod := desiredDeployment.Spec.Template.Spec -// for i := range pod.Containers { -// if pod.Containers[i].Name == userContainerName { -// pod.Containers[i].Image = desiredDeployment.Spec.Template.Spec.Containers[i].Image -// } -// } - -// if equality.Semantic.DeepEqual(desiredDeployment.Spec, deployment.Spec) { -// return deployment, Unchanged, nil -// } -// logger.Infof("Reconciling deployment diff (-desired, +observed): %v", -// cmp.Diff(desiredDeployment.Spec, deployment.Spec, cmpopts.IgnoreUnexported(resource.Quantity{}))) -// deployment.Spec = desiredDeployment.Spec - -// d, err := c.KubeClientSet.AppsV1().Deployments(deployment.Namespace).Update(deployment) -// return d, WasChanged, err -// } +// This is a generic function used both for deployment of user code & autoscaler +func (c *Controller) checkAndUpdateDeployment(ctx context.Context, rev *v1alpha1.Revision, deployment *appsv1.Deployment) (*appsv1.Deployment, Changed, error) { + logger := logging.FromContext(ctx) + + // TODO(mattmoor): Generalize this to reconcile discrepancies vs. what + // MakeServingDeployment() would produce. + desiredDeployment := deployment.DeepCopy() + if desiredDeployment.Spec.Replicas == nil { + var one int32 = 1 + desiredDeployment.Spec.Replicas = &one + } + if rev.Spec.ServingState == v1alpha1.RevisionServingStateActive && *desiredDeployment.Spec.Replicas == 0 { + *desiredDeployment.Spec.Replicas = 1 + } else if rev.Spec.ServingState == v1alpha1.RevisionServingStateReserve && *desiredDeployment.Spec.Replicas != 0 { + *desiredDeployment.Spec.Replicas = 0 + } + + if equality.Semantic.DeepEqual(desiredDeployment.Spec, deployment.Spec) { + return deployment, Unchanged, nil + } + logger.Infof("Reconciling deployment diff (-desired, +observed): %v", + cmp.Diff(desiredDeployment.Spec, deployment.Spec, cmpopts.IgnoreUnexported(resource.Quantity{}))) + deployment.Spec = desiredDeployment.Spec + d, err := c.KubeClientSet.AppsV1().Deployments(deployment.Namespace).Update(deployment) + return d, WasChanged, err +} +// This is a generic function used both for deployment of user code & autoscaler func (c *Controller) deleteDeployment(ctx context.Context, deployment *appsv1.Deployment) error { logger := logging.FromContext(ctx) @@ -528,9 +524,9 @@ func (c *Controller) deleteDeployment(ctx context.Context, deployment *appsv1.De } func (c *Controller) reconcileService(ctx context.Context, rev *v1alpha1.Revision) error { - logger := logging.FromContext(ctx) ns := controller.GetServingNamespaceName(rev.Namespace) serviceName := controller.GetServingK8SServiceNameForRevision(rev) + logger := logging.FromContext(ctx).With(zap.String(logkey.KubernetesService, serviceName)) rev.Status.ServiceName = serviceName @@ -731,9 +727,9 @@ func (c *Controller) reconcileAutoscalerService(ctx context.Context, rev *v1alph return nil } - logger := logging.FromContext(ctx) ns := pkg.GetServingSystemNamespace() serviceName := controller.GetRevisionAutoscalerName(rev) + logger := logging.FromContext(ctx).With(zap.String(logkey.KubernetesService, serviceName)) service, err := c.serviceLister.Services(ns).Get(serviceName) switch rev.Spec.ServingState { @@ -796,37 +792,43 @@ func (c *Controller) reconcileAutoscalerDeployment(ctx context.Context, rev *v1a return nil } - logger := logging.FromContext(ctx) ns := pkg.GetServingSystemNamespace() deploymentName := controller.GetRevisionAutoscalerName(rev) + logger := logging.FromContext(ctx).With(zap.String(logkey.Deployment, deploymentName)) - deployment, err := c.deploymentLister.Deployments(ns).Get(deploymentName) + deployment, getDepErr := c.deploymentLister.Deployments(ns).Get(deploymentName) switch rev.Spec.ServingState { - case v1alpha1.RevisionServingStateActive: - // When Active, the Autoscaler Deployment should exist and have a particular specification. - if apierrs.IsNotFound(err) { - // If it does not exist, then create it. + case v1alpha1.RevisionServingStateActive, v1alpha1.RevisionServingStateReserve: + // When Active or Reserved, Autoscaler deployment should exist and have a particular specification. + if apierrs.IsNotFound(getDepErr) { + // Deployment does not exist. Create it. + var err error deployment, err = c.createAutoscalerDeployment(ctx, rev) if err != nil { - logger.Errorf("Error creating Autoscaler Deployment %q: %v", deploymentName, err) + logger.Errorf("Error creating Autoscaler deployment %q: %v", deploymentName, err) return err } - logger.Infof("Created Autoscaler Deployment %q", deploymentName) - } else if err != nil { - logger.Errorf("Error reconciling Active Autoscaler Deployment %q: %v", deploymentName, err) - return err + logger.Infof("Created Autoscaler deployment %q", deploymentName) + } else if getDepErr != nil { + logger.Errorf("Error reconciling Autoscaler deployment %q: %v", deploymentName, getDepErr) + return getDepErr } else { - // TODO(mattmoor): Don't reconcile Deployments until we can avoid - // fighting with its defaulter. + // Deployment exist. Update the replica count based on the serving state if necessary + var err error + deployment, _, err = c.checkAndUpdateDeployment(ctx, rev, deployment) + if err != nil { + logger.Errorf("Error updating deployment %q: %v", deploymentName, err) + return err + } } // TODO(mattmoor): We don't predicate the Revision's readiness on any readiness // properties of the autoscaler, but perhaps we should. return nil - case v1alpha1.RevisionServingStateReserve, v1alpha1.RevisionServingStateRetired: + case v1alpha1.RevisionServingStateRetired: // When Reserve or Retired, we remove the underlying Autoscaler Deployment. - if apierrs.IsNotFound(err) { + if apierrs.IsNotFound(getDepErr) { // If it does not exist, then we have nothing to do. return nil } @@ -844,8 +846,11 @@ func (c *Controller) reconcileAutoscalerDeployment(ctx context.Context, rev *v1a } func (c *Controller) createAutoscalerDeployment(ctx context.Context, rev *v1alpha1.Revision) (*appsv1.Deployment, error) { - deployment := MakeServingAutoscalerDeployment(rev, c.controllerConfig.AutoscalerImage) - + var replicaCount int32 = 1 + if rev.Spec.ServingState == v1alpha1.RevisionServingStateReserve { + replicaCount = 0 + } + deployment := MakeServingAutoscalerDeployment(rev, c.controllerConfig.AutoscalerImage, replicaCount) return c.KubeClientSet.AppsV1().Deployments(deployment.Namespace).Create(deployment) } diff --git a/pkg/controller/revision/revision_test.go b/pkg/controller/revision/revision_test.go index 9aa4b721fe40..32e8eb9bca11 100644 --- a/pkg/controller/revision/revision_test.go +++ b/pkg/controller/revision/revision_test.go @@ -81,9 +81,8 @@ func getTestRevision() *v1alpha1.Revision { Name: "test-rev", Namespace: testNamespace, Labels: map[string]string{ - "testLabel1": "foo", - "testLabel2": "bar", - serving.RouteLabelKey: "test-route", + "testLabel1": "foo", + "testLabel2": "bar", }, Annotations: map[string]string{ "testAnnotation": "test", @@ -350,6 +349,12 @@ func addResourcesToInformers(t *testing.T, kubeInformer.Apps().V1().Deployments().Informer().GetIndexer().Add(deployment) } + // Add autoscaler deployment if any + autoscalerDeployment, err := kubeClient.AppsV1().Deployments(pkg.GetServingSystemNamespace()).Get(ctrl.GetRevisionAutoscalerName(rev), metav1.GetOptions{}) + if err == nil { + kubeInformer.Apps().V1().Deployments().Informer().GetIndexer().Add(autoscalerDeployment) + } + serviceName := ctrl.GetServingK8SServiceNameForRevision(rev) service, err := kubeClient.CoreV1().Services(ns).Get(serviceName, metav1.GetOptions{}) if apierrs.IsNotFound(err) && (haveBuild || inActive) { @@ -360,6 +365,12 @@ func addResourcesToInformers(t *testing.T, kubeInformer.Core().V1().Services().Informer().GetIndexer().Add(service) } + // Add autoscaler service if any + autoscalerService, err := kubeClient.CoreV1().Services(pkg.GetServingSystemNamespace()).Get(ctrl.GetRevisionAutoscalerName(rev), metav1.GetOptions{}) + if err == nil { + kubeInformer.Core().V1().Services().Informer().GetIndexer().Add(autoscalerService) + } + return rev, deployment, service } @@ -1576,7 +1587,7 @@ func TestActiveToRetiredRevisionDeletesStuff(t *testing.T) { } } -func TestActiveToReserveRevisionDeletesStuff(t *testing.T) { +func TestActiveToReserveRevisionDeactivateDeployment(t *testing.T) { kubeClient, _, servingClient, _, controller, kubeInformer, _, servingInformer, _, _ := newTestController(t) rev := getTestRevision() @@ -1589,11 +1600,30 @@ func TestActiveToReserveRevisionDeletesStuff(t *testing.T) { rev.Spec.ServingState = v1alpha1.RevisionServingStateReserve updateRevision(t, kubeClient, kubeInformer, servingClient, servingInformer, controller, rev) - // Expect the deployment to be gone. - expectedDeploymentName := fmt.Sprintf("%s-deployment", rev.Name) - deployment, err := kubeClient.AppsV1().Deployments(testNamespace).Get(expectedDeploymentName, metav1.GetOptions{}) - if err == nil { - t.Fatalf("Expected serving deployment to be missing but it was really here: %v", deployment) + // Expect the deployment to be there. + deploymentName := ctrl.GetRevisionDeploymentName(rev) + deployment, err := kubeClient.AppsV1().Deployments(testNamespace).Get(deploymentName, metav1.GetOptions{}) + if err != nil { + if apierrs.IsNotFound(err) { + t.Fatalf("Expected k8s deployment to be there but it was gone: %s/%s", testNamespace, deploymentName) + } + t.Fatalf("There was an error to get the deployment %s while it exists", deploymentName) + } + if *deployment.Spec.Replicas != 0 { + t.Fatalf("Expected k8s deployment to have %v replicas but got %v replicas.", 0, *deployment.Spec.Replicas) + } + + // Expect the autoscaler deployment to be there. + deploymentName = ctrl.GetRevisionAutoscalerName(rev) + _, err = kubeClient.AppsV1().Deployments(pkg.GetServingSystemNamespace()).Get(deploymentName, metav1.GetOptions{}) + if err != nil { + if apierrs.IsNotFound(err) { + t.Fatalf("Expected autoscaler k8s deployment to be there but it was gone: %s/%s", testNamespace, deploymentName) + } + t.Fatalf("There was an error to get the autoscaler deployment %s while it exists", deploymentName) + } + if *deployment.Spec.Replicas != 0 { + t.Fatalf("Expected k8s deployment to have %v replicas but got %v replicas.", 0, *deployment.Spec.Replicas) } } @@ -1628,27 +1658,39 @@ func TestReserveToActiveRevisionCreatesStuff(t *testing.T) { kubeClient, _, servingClient, _, controller, kubeInformer, _, servingInformer, _, _ := newTestController(t) rev := getTestRevision() - // Create revision. The k8s resources should not be created. + // Create revision. Two deployments should be created with 0 replicas. rev.Spec.ServingState = v1alpha1.RevisionServingStateReserve createRevision(t, kubeClient, kubeInformer, servingClient, servingInformer, controller, rev) - // Expect the deployment to be gone. - expectedDeploymentName := fmt.Sprintf("%s-deployment", rev.Name) - deployment, err := kubeClient.AppsV1().Deployments(testNamespace).Get(expectedDeploymentName, metav1.GetOptions{}) - if err == nil { - t.Fatalf("Expected serving deployment to be missing but it was really here: %v", deployment) + checkZeroReplicas := func(deploymentName string, ns string) { + deployment, err := kubeClient.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Expected deployment %v to be there with zero replicas but it is missing: %v", deploymentName, err) + } + if *deployment.Spec.Replicas != 0 { + t.Fatalf("Expected deployment %v to have %v replicas but got %v replicas.", deploymentName, 0, *deployment.Spec.Replicas) + } } + checkZeroReplicas(ctrl.GetRevisionDeploymentName(rev), testNamespace) + checkZeroReplicas(ctrl.GetRevisionAutoscalerName(rev), pkg.GetServingSystemNamespace()) + // Now, update the revision serving state to Active, and force another // run of the controller. rev.Spec.ServingState = v1alpha1.RevisionServingStateActive updateRevision(t, kubeClient, kubeInformer, servingClient, servingInformer, controller, rev) - // Expect the resources to be created. - _, err = kubeClient.AppsV1().Deployments(testNamespace).Get(expectedDeploymentName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Couldn't get serving deployment: %v", err) + checkNonZeroReplicas := func(deploymentName string, ns string) { + deployment, err := kubeClient.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Expected to have deployment %v with more than zero replicas but it is missing: %v", deploymentName, err) + } + if *deployment.Spec.Replicas == 0 { + t.Fatalf("Expected deployment %v to have more than 0 replicas but got %v replicas.", deploymentName, *deployment.Spec.Replicas) + } } + checkNonZeroReplicas(ctrl.GetRevisionDeploymentName(rev), testNamespace) + checkNonZeroReplicas(ctrl.GetRevisionAutoscalerName(rev), pkg.GetServingSystemNamespace()) } func TestNoAutoscalerImageCreatesNoAutoscalers(t *testing.T) { @@ -1725,6 +1767,85 @@ func TestIstioOutboundIPRangesInjection(t *testing.T) { } } +func TestReconcileReplicaCount(t *testing.T) { + kubeClient, _, elaClient, _, controller, kubeInformer, _, elaInformer, _, _ := newTestController(t) + rev := getTestRevision() + + rev.Spec.ServingState = v1alpha1.RevisionServingStateReserve + createRevision(t, kubeClient, kubeInformer, elaClient, elaInformer, controller, rev) + getDeployments := func() (*appsv1.Deployment, *appsv1.Deployment) { + d1, err := kubeClient.AppsV1().Deployments(testNamespace).Get(ctrl.GetRevisionDeploymentName(rev), metav1.GetOptions{}) + if err != nil { + t.Fatalf("Expected to have a deployment but found none: %v", err) + } + d2, err := kubeClient.AppsV1().Deployments(pkg.GetServingSystemNamespace()).Get(ctrl.GetRevisionAutoscalerName(rev), metav1.GetOptions{}) + if err != nil { + t.Fatalf("Expected to have an autoscaler deployment but found none: %v", err) + } + return d1, d2 + } + + d1, d2 := getDeployments() + + // Update the replica count to a positive number. This should get reconciled back to 0. + d1.Spec.Replicas = new(int32) + *d1.Spec.Replicas = 10 + d2.Spec.Replicas = new(int32) + *d2.Spec.Replicas = 20 + kubeClient.AppsV1().Deployments(testNamespace).Update(d1) + kubeClient.AppsV1().Deployments(pkg.GetServingSystemNamespace()).Update(d2) + kubeInformer.Apps().V1().Deployments().Informer().GetIndexer().Update(d1) + kubeInformer.Apps().V1().Deployments().Informer().GetIndexer().Update(d2) + updateRevision(t, kubeClient, kubeInformer, elaClient, elaInformer, controller, rev) + d1, d2 = getDeployments() + if *d1.Spec.Replicas != 0 { + t.Fatalf("Expected deployment to have 0 replicas, got: %v", *d1.Spec.Replicas) + } + if *d2.Spec.Replicas != 0 { + t.Fatalf("Expected autoscaler deployment to have 0 replicas, got: %v", *d2.Spec.Replicas) + } + + // Activate the revision. Replicas should increase to 1 + rev.Spec.ServingState = v1alpha1.RevisionServingStateActive + updateRevision(t, kubeClient, kubeInformer, elaClient, elaInformer, controller, rev) + d1, d2 = getDeployments() + if *d1.Spec.Replicas != 1 { + t.Fatalf("Expected deployment to have 1 replicas, got: %v", *d1.Spec.Replicas) + } + if *d2.Spec.Replicas != 1 { + t.Fatalf("Expected autoscaler deployment to have 1 replicas, got: %v", *d2.Spec.Replicas) + } + + // Increase the replica count - those should be kept intact + d1.Spec.Replicas = new(int32) + *d1.Spec.Replicas = 30 + d2.Spec.Replicas = new(int32) + *d2.Spec.Replicas = 40 + kubeClient.AppsV1().Deployments(testNamespace).Update(d1) + kubeClient.AppsV1().Deployments(pkg.GetServingSystemNamespace()).Update(d2) + kubeInformer.Apps().V1().Deployments().Informer().GetIndexer().Update(d1) + kubeInformer.Apps().V1().Deployments().Informer().GetIndexer().Update(d2) + updateRevision(t, kubeClient, kubeInformer, elaClient, elaInformer, controller, rev) + d1, d2 = getDeployments() + if *d1.Spec.Replicas != 30 { + t.Fatalf("Expected deployment to have 30 replicas, got: %v", *d1.Spec.Replicas) + } + if *d2.Spec.Replicas != 40 { + t.Fatalf("Expected autoscaler deployment to have 40 replicas, got: %v", *d2.Spec.Replicas) + } + + // Deactivate the revision. Replicas should go back to 0. + rev.Spec.ServingState = v1alpha1.RevisionServingStateReserve + updateRevision(t, kubeClient, kubeInformer, elaClient, elaInformer, controller, rev) + d1, d2 = getDeployments() + if *d1.Spec.Replicas != 0 { + t.Fatalf("Expected deployment to have 0 replicas, got: %v", *d1.Spec.Replicas) + } + if *d2.Spec.Replicas != 0 { + t.Fatalf("Expected autoscaler deployment to have 0 replicas, got: %v", *d2.Spec.Replicas) + } +} + func getPodAnnotationsForConfig(t *testing.T, configMapValue string, configAnnotationOverride string) map[string]string { controllerConfig := getTestControllerConfig() kubeClient, _, servingClient, _, controller, kubeInformer, _, servingInformer, _, _ := newTestControllerWithConfig(t, &controllerConfig) diff --git a/pkg/controller/route/route.go b/pkg/controller/route/route.go index d8d1275bf3da..7c756ab9624f 100644 --- a/pkg/controller/route/route.go +++ b/pkg/controller/route/route.go @@ -272,13 +272,6 @@ func (c *Controller) syncTrafficTargets(ctx context.Context, route *v1alpha1.Rou return nil, err } - if err := c.deleteLabelForOutsideOfGivenRevisions(ctx, route, revMap); err != nil { - return nil, err - } - if err := c.setLabelForGivenRevisions(ctx, route, revMap); err != nil { - return nil, err - } - // Then create the actual route rules. logger.Info("Creating Istio route rules") revisionRoutes, err := c.createOrUpdateRouteRules(ctx, route, configMap, revMap) @@ -495,51 +488,6 @@ func (c *Controller) setLabelForGivenConfigurations( return nil } -func (c *Controller) setLabelForGivenRevisions( - ctx context.Context, route *v1alpha1.Route, revMap map[string]*v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - revisionClient := c.ServingClientSet.ServingV1alpha1().Revisions(route.Namespace) - - // Validate revision if it already has a route label - for _, rev := range revMap { - if routeName, ok := rev.Labels[serving.RouteLabelKey]; ok { - if routeName != route.Name { - errMsg := fmt.Sprintf("Revision %q is already in use by %q, and cannot be used by %q", - rev.Name, routeName, route.Name) - c.Recorder.Event(route, corev1.EventTypeWarning, "RevisionInUse", errMsg) - logger.Error(errMsg) - return errors.New(errMsg) - } - } - } - - for _, rev := range revMap { - if rev.Labels != nil { - if _, ok := rev.Labels[serving.RouteLabelKey]; ok { - continue - } - } - // Fetch the latest version of the revision to label, to narrow the window for - // optimistic concurrency failures. - latestRev, err := revisionClient.Get(rev.Name, metav1.GetOptions{}) - if err != nil { - return err - } - if latestRev.Labels == nil { - latestRev.Labels = make(map[string]string) - } else if _, ok := latestRev.Labels[serving.RouteLabelKey]; ok { - continue - } - latestRev.Labels[serving.RouteLabelKey] = route.Name - if _, err := revisionClient.Update(latestRev); err != nil { - logger.Errorf("Failed to add route label to Revision %s: %s", rev.Name, err) - return err - } - } - - return nil -} - func (c *Controller) deleteLabelForOutsideOfGivenConfigurations( ctx context.Context, route *v1alpha1.Route, configMap map[string]*v1alpha1.Configuration) error { logger := logging.FromContext(ctx) @@ -570,36 +518,6 @@ func (c *Controller) deleteLabelForOutsideOfGivenConfigurations( return nil } -func (c *Controller) deleteLabelForOutsideOfGivenRevisions( - ctx context.Context, route *v1alpha1.Route, revMap map[string]*v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - revClient := c.ServingClientSet.ServingV1alpha1().Revisions(route.Namespace) - - oldRevList, err := revClient.List( - metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s", serving.RouteLabelKey, route.Name), - }, - ) - if err != nil { - logger.Errorf("Failed to fetch revisions with label '%s=%s': %s", - serving.RouteLabelKey, route.Name, err) - return err - } - - // Delete label for newly removed revisions as traffic target. - for _, rev := range oldRevList.Items { - if _, ok := revMap[rev.Name]; !ok { - delete(rev.Labels, serving.RouteLabelKey) - if _, err := revClient.Update(&rev); err != nil { - logger.Errorf("Failed to remove route label from Revision %s: %s", rev.Name, err) - return err - } - } - } - - return nil -} - // computeRevisionRoutes computes RevisionRoute for a route object. If there is one or more inactive revisions and enableScaleToZero // is true, a route rule with the activator service as the destination will be added. It returns the revision routes, the inactive // revision name to which the activator should forward requests to, and error if there is any. @@ -651,9 +569,9 @@ func (c *Controller) computeRevisionRoutes( cond := rev.Status.GetCondition(v1alpha1.RevisionConditionReady) if enableScaleToZero && cond != nil { // A revision is considered inactive (yet) if it's in - // "Inactive" condition or "Activating" condition. + // "Inactive" and "Updating" condition if (cond.Reason == "Inactive" && cond.Status == corev1.ConditionFalse) || - (cond.Reason == "Activating" && cond.Status == corev1.ConditionUnknown) { + (cond.Reason == "Updating" && cond.Status == corev1.ConditionUnknown) { // Let inactiveRev be the Reserve revision with the largest traffic weight. if tt.Percent > maxInactivePercent { maxInactivePercent = tt.Percent diff --git a/pkg/controller/route/route_test.go b/pkg/controller/route/route_test.go index 6f55e048b508..9fc40292b890 100644 --- a/pkg/controller/route/route_test.go +++ b/pkg/controller/route/route_test.go @@ -986,105 +986,6 @@ func TestSetLabelToConfigurationDirectlyConfigured(t *testing.T) { } } -func TestSetLabelToRevisionDirectlyConfigured(t *testing.T) { - _, servingClient, controller, _, servingInformer, _ := newTestController(t) - config := getTestConfiguration() - rev := getTestRevisionForConfig(config) - route := getTestRouteWithTrafficTargets( - []v1alpha1.TrafficTarget{{ - ConfigurationName: config.Name, - Percent: 100, - }}, - ) - - servingClient.ServingV1alpha1().Configurations(testNamespace).Create(config) - servingClient.ServingV1alpha1().Revisions(testNamespace).Create(rev) - servingClient.ServingV1alpha1().Routes(testNamespace).Create(route) - // Since updateRouteEvent looks in the lister, we need to add it to the informer - servingInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) - controller.updateRouteEvent(KeyOrDie(route)) - - rev, err := servingClient.ServingV1alpha1().Revisions(testNamespace).Get(rev.Name, metav1.GetOptions{}) - if err != nil { - t.Fatalf("error getting revision: %v", err) - } - - // Revision should not have route label as the revision is not marked as the Config's latest ready revision - expectedLabels := map[string]string{ - serving.ConfigurationLabelKey: config.Name, - } - - if diff := cmp.Diff(expectedLabels, rev.Labels); diff != "" { - t.Errorf("Unexpected label diff (-want +got): %v", diff) - } - - // Mark the revision as the Config's latest ready revision - config.Status.LatestReadyRevisionName = rev.Name - - servingClient.ServingV1alpha1().Configurations(testNamespace).Update(config) - controller.updateRouteEvent(KeyOrDie(route)) - - rev, err = servingClient.ServingV1alpha1().Revisions(testNamespace).Get(rev.Name, metav1.GetOptions{}) - if err != nil { - t.Fatalf("error getting revision: %v", err) - } - - // Revision should have the route label - expectedLabels = map[string]string{ - serving.ConfigurationLabelKey: config.Name, - serving.RouteLabelKey: route.Name, - } - - if diff := cmp.Diff(expectedLabels, rev.Labels); diff != "" { - t.Errorf("Unexpected label diff (-want +got): %v", diff) - } -} - -func TestSetLabelToConfigurationAndRevisionIndirectlyConfigured(t *testing.T) { - _, servingClient, controller, _, servingInformer, _ := newTestController(t) - config := getTestConfiguration() - rev := getTestRevisionForConfig(config) - route := getTestRouteWithTrafficTargets( - []v1alpha1.TrafficTarget{{ - RevisionName: rev.Name, - Percent: 100, - }}, - ) - - servingClient.ServingV1alpha1().Configurations(testNamespace).Create(config) - servingClient.ServingV1alpha1().Revisions(testNamespace).Create(rev) - servingClient.ServingV1alpha1().Routes(testNamespace).Create(route) - // Since updateRouteEvent looks in the lister, we need to add it to the informer - servingInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) - controller.updateRouteEvent(KeyOrDie(route)) - - config, err := servingClient.ServingV1alpha1().Configurations(testNamespace).Get(config.Name, metav1.GetOptions{}) - if err != nil { - t.Fatalf("error getting config: %v", err) - } - - // Configuration should be labeled for this route - expectedLabels := map[string]string{serving.RouteLabelKey: route.Name} - if diff := cmp.Diff(expectedLabels, config.Labels); diff != "" { - t.Errorf("Unexpected label in configuration diff (-want +got): %v", diff) - } - - rev, err = servingClient.ServingV1alpha1().Revisions(testNamespace).Get(rev.Name, metav1.GetOptions{}) - if err != nil { - t.Fatalf("error getting revision: %v", err) - } - - // Revision should have the route label - expectedLabels = map[string]string{ - serving.ConfigurationLabelKey: config.Name, - serving.RouteLabelKey: route.Name, - } - - if diff := cmp.Diff(expectedLabels, rev.Labels); diff != "" { - t.Errorf("Unexpected label in revision diff (-want +got): %v", diff) - } -} - func TestCreateRouteWithInvalidConfigurationShouldReturnError(t *testing.T) { _, servingClient, controller, _, servingInformer, _ := newTestController(t) config := getTestConfiguration() @@ -1221,7 +1122,7 @@ func TestCreateRouteConfigurationMissingCondition(t *testing.T) { } } -func TestSetLabelNotChangeConfigurationAndRevisionLabelIfLabelExists(t *testing.T) { +func TestSetLabelNotChangeConfigurationLabelIfLabelExists(t *testing.T) { _, servingClient, controller, _, servingInformer, _ := newTestController(t) config := getTestConfiguration() rev := getTestRevisionForConfig(config) @@ -1235,10 +1136,6 @@ func TestSetLabelNotChangeConfigurationAndRevisionLabelIfLabelExists(t *testing. // by function setLabelForGivenConfigurations. config.Labels = map[string]string{serving.RouteLabelKey: route.Name} - // Set revision's route label with route name to make sure revision's label will not be set - // by function setLabelForGivenRevisions. - rev.Labels[serving.RouteLabelKey] = route.Name - servingClient.ServingV1alpha1().Configurations(testNamespace).Create(config) servingClient.ServingV1alpha1().Revisions(testNamespace).Create(rev) servingClient.ServingV1alpha1().Routes(testNamespace).Create(route) @@ -1264,15 +1161,13 @@ func TestSetLabelNotChangeConfigurationAndRevisionLabelIfLabelExists(t *testing. controller.updateRouteEvent(route.Namespace + "/" + route.Name) } -func TestDeleteLabelOfConfigurationAndRevisionWhenUnconfigured(t *testing.T) { +func TestDeleteLabelOfConfigurationWhenUnconfigured(t *testing.T) { _, servingClient, controller, _, servingInformer, _ := newTestController(t) route := getTestRouteWithTrafficTargets([]v1alpha1.TrafficTarget{}) config := getTestConfiguration() // Set a route label in configuration which is expected to be deleted. config.Labels = map[string]string{serving.RouteLabelKey: route.Name} rev := getTestRevisionForConfig(config) - // Set a route label in revision which is expected to be deleted. - rev.Labels[serving.RouteLabelKey] = route.Name servingClient.ServingV1alpha1().Configurations(testNamespace).Create(config) servingClient.ServingV1alpha1().Revisions(testNamespace).Create(rev) diff --git a/pkg/logging/logkey/constants.go b/pkg/logging/logkey/constants.go index dae204722b4e..664d4ae5a3a6 100644 --- a/pkg/logging/logkey/constants.go +++ b/pkg/logging/logkey/constants.go @@ -61,4 +61,10 @@ const ( // Pod is the key used to represent a pod's name in logs Pod = "knative.dev/pod" + + // Deployment is the key used to represent a deployment's name in logs + Deployment = "knative.dev/deployment" + + // KubernetesService is the key used to represent a Kubernetes service name in logs + KubernetesService = "knative.dev/k8sservice" )