diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 9dd123a56ad4..e804777b5849 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -36,10 +36,18 @@ import ( buildinformers "github.com/knative/build/pkg/client/informers/externalversions" clientset "github.com/knative/serving/pkg/client/clientset/versioned" informers "github.com/knative/serving/pkg/client/informers/externalversions" + "github.com/knative/serving/pkg/controller/build" "github.com/knative/serving/pkg/controller/configuration" + "github.com/knative/serving/pkg/controller/deployment" + "github.com/knative/serving/pkg/controller/endpoint" + "github.com/knative/serving/pkg/controller/ingress" "github.com/knative/serving/pkg/controller/revision" "github.com/knative/serving/pkg/controller/route" "github.com/knative/serving/pkg/controller/service" + cfgrcv "github.com/knative/serving/pkg/receiver/configuration" + revrcv "github.com/knative/serving/pkg/receiver/revision" + rtrcv "github.com/knative/serving/pkg/receiver/route" + svcrcv "github.com/knative/serving/pkg/receiver/service" "github.com/knative/serving/pkg/signals" "go.opencensus.io/exporter/prometheus" "go.opencensus.io/stats/view" @@ -134,7 +142,7 @@ func main() { logger.Fatalf("Error loading controller config: %v", err) } - revControllerConfig := revision.ControllerConfig{ + revControllerConfig := revrcv.ControllerConfig{ AutoscaleConcurrencyQuantumOfTime: autoscaleConcurrencyQuantumOfTime, AutoscaleEnableSingleConcurrency: autoscaleEnableSingleConcurrency, AutoscalerImage: autoscalerImage, @@ -149,15 +157,51 @@ func main() { QueueProxyLoggingLevel: queueProxyLoggingLevel.Get(), } - // Build all of our controllers, with the clients constructed above. - // Add new controllers to this array. - controllers := []controller.Interface{ - configuration.NewController(kubeClient, elaClient, buildClient, kubeInformerFactory, elaInformerFactory, cfg, *controllerConfig, logger), - revision.NewController(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, buildInformerFactory, cfg, &revControllerConfig, logger), - route.NewController(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, cfg, *controllerConfig, autoscaleEnableScaleToZero, logger), - service.NewController(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, cfg, *controllerConfig, logger), + // The receivers are what implement our core logic. Each of these subscribe to some subset of the resources for which we + // have controllers below. + receivers := []interface{}{ + revrcv.New(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, cfg, &revControllerConfig, logger), + rtrcv.New(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, cfg, *controllerConfig, autoscaleEnableScaleToZero, logger), + cfgrcv.New(kubeClient, elaClient, buildClient, kubeInformerFactory, elaInformerFactory, cfg, *controllerConfig, logger), + svcrcv.New(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, cfg, *controllerConfig, logger), } + // Construct a collection of thin workqueue controllers. Each of these looks for entries in "receivers" that implements foo.Receiver, + // and on reconciliation events forwards the work to those receivers. + rtc, err := route.NewController(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, cfg, *controllerConfig, logger, receivers...) + if err != nil { + logger.Fatalf("Error creating Route controller: %v", err) + } + svc, err := service.NewController(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, cfg, *controllerConfig, logger, receivers...) + if err != nil { + logger.Fatalf("Error creating Service controller: %v", err) + } + config, err := configuration.NewController(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, cfg, *controllerConfig, logger, receivers...) + if err != nil { + logger.Fatalf("Error creating Configuration controller: %v", err) + } + rc, err := revision.NewController(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, cfg, *controllerConfig, logger, receivers...) + if err != nil { + logger.Fatalf("Error creating Revision controller: %v", err) + } + dc, err := deployment.NewController(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, cfg, *controllerConfig, logger, receivers...) + if err != nil { + logger.Fatalf("Error creating Deployment controller: %v", err) + } + ec, err := endpoint.NewController(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, cfg, *controllerConfig, logger, receivers...) + if err != nil { + logger.Fatalf("Error creating Endpoints controller: %v", err) + } + bc, err := build.NewController(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, buildInformerFactory, cfg, *controllerConfig, logger, receivers...) + if err != nil { + logger.Fatalf("Error creating Build controller: %v", err) + } + ic, err := ingress.NewController(kubeClient, elaClient, kubeInformerFactory, elaInformerFactory, cfg, *controllerConfig, logger, receivers...) + if err != nil { + logger.Fatalf("Error creating Ingress controller: %v", err) + } + controllers := []controller.Interface{rtc, svc, config, rc, dc, ec, bc, ic} + go kubeInformerFactory.Start(stopCh) go elaInformerFactory.Start(stopCh) go buildInformerFactory.Start(stopCh) diff --git a/pkg/controller/build/build.go b/pkg/controller/build/build.go new file mode 100644 index 000000000000..af060c4c8fc0 --- /dev/null +++ b/pkg/controller/build/build.go @@ -0,0 +1,131 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package build + +import ( + "fmt" + + buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" + buildinformers "github.com/knative/build/pkg/client/informers/externalversions" + listers "github.com/knative/build/pkg/client/listers/build/v1alpha1" + clientset "github.com/knative/serving/pkg/client/clientset/versioned" + informers "github.com/knative/serving/pkg/client/informers/externalversions" + "github.com/knative/serving/pkg/controller" + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/runtime" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +const controllerAgentName = "build-controller" + +// Receiver defines the interface that a receiver must implement to receive events +// from this Controller. +type Receiver interface { + SyncBuild(*buildv1alpha1.Build) error +} + +// Controller implements the controller for Build resources +type Controller struct { + *controller.Base + + // lister indexes properties about Build + lister listers.BuildLister + synced cache.InformerSynced + + receivers []Receiver +} + +// NewController creates a new Build controller +func NewController( + kubeClientSet kubernetes.Interface, + elaClientSet clientset.Interface, + kubeInformerFactory kubeinformers.SharedInformerFactory, + elaInformerFactory informers.SharedInformerFactory, + buildInformerFactory buildinformers.SharedInformerFactory, + config *rest.Config, + controllerConfig controller.Config, + logger *zap.SugaredLogger, + receivers ...interface{}) (controller.Interface, error) { + + // obtain references to a shared index informer for the Build type. + informer := buildInformerFactory.Build().V1alpha1().Builds() + + controller := &Controller{ + Base: controller.NewBase(kubeClientSet, elaClientSet, kubeInformerFactory, + elaInformerFactory, informer.Informer(), controllerAgentName, "Builds", logger), + lister: informer.Lister(), + synced: informer.Informer().HasSynced, + } + + for _, rcv := range receivers { + if dr, ok := rcv.(Receiver); ok { + controller.receivers = append(controller.receivers, dr) + } + } + if len(controller.receivers) == 0 { + return nil, fmt.Errorf("None of the provided receivers implement build.Receiver. " + + "If the Build controller is no longer needed it should be removed.") + } + return controller, nil +} + +// Run will set up the event handlers for types we are interested in, as well +// as syncing informer caches and starting workers. It will block until stopCh +// is closed, at which point it will shutdown the workqueue and wait for +// workers to finish processing their current work items. +func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { + return c.RunController(threadiness, stopCh, + []cache.InformerSynced{c.synced}, c.syncHandler, "Build") +} + +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Build +// resource with the current status of the resource. +func (c *Controller) syncHandler(key string) error { + // Convert the namespace/name string into a distinct namespace and name + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) + return nil + } + + // Get the Build resource with this namespace/name + original, err := c.lister.Builds(namespace).Get(name) + if err != nil { + // The resource may no longer exist, in which case we stop + // processing. + if errors.IsNotFound(err) { + runtime.HandleError(fmt.Errorf("build %q in work queue no longer exists", key)) + return nil + } + + return err + } + + for _, dr := range c.receivers { + // Don't modify the informer's copy, and give each receiver a fresh copy. + cp := original.DeepCopy() + if err := dr.SyncBuild(cp); err != nil { + return err + } + } + return nil +} diff --git a/pkg/controller/configuration/configuration.go b/pkg/controller/configuration/configuration.go index b4bbee4e7363..4d6f45caf16e 100644 --- a/pkg/controller/configuration/configuration.go +++ b/pkg/controller/configuration/configuration.go @@ -17,23 +17,15 @@ limitations under the License. package configuration import ( - "context" "fmt" - buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" - buildclientset "github.com/knative/build/pkg/client/clientset/versioned" - "github.com/knative/serving/pkg/apis/serving" "github.com/knative/serving/pkg/apis/serving/v1alpha1" clientset "github.com/knative/serving/pkg/client/clientset/versioned" informers "github.com/knative/serving/pkg/client/informers/externalversions" listers "github.com/knative/serving/pkg/client/listers/serving/v1alpha1" "github.com/knative/serving/pkg/controller" - "github.com/knative/serving/pkg/logging" - "github.com/knative/serving/pkg/logging/logkey" "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" @@ -43,51 +35,54 @@ import ( const controllerAgentName = "configuration-controller" +// Receiver defines the interface that a receiver must implement to receive events +// from this Controller. +type Receiver interface { + SyncConfiguration(*v1alpha1.Configuration) error +} + // Controller implements the controller for Configuration resources type Controller struct { *controller.Base - buildClientSet buildclientset.Interface - // lister indexes properties about Configuration lister listers.ConfigurationLister synced cache.InformerSynced - // don't start the workers until revisions cache have been synced - revisionsSynced cache.InformerSynced + receivers []Receiver } // NewController creates a new Configuration controller func NewController( kubeClientSet kubernetes.Interface, elaClientSet clientset.Interface, - buildClientSet buildclientset.Interface, kubeInformerFactory kubeinformers.SharedInformerFactory, elaInformerFactory informers.SharedInformerFactory, config *rest.Config, controllerConfig controller.Config, - logger *zap.SugaredLogger) controller.Interface { + logger *zap.SugaredLogger, + receivers ...interface{}) (controller.Interface, error) { - // obtain references to a shared index informer for the Configuration - // and Revision type. + // obtain references to a shared index informer for the Configuration type. informer := elaInformerFactory.Serving().V1alpha1().Configurations() - revisionInformer := elaInformerFactory.Serving().V1alpha1().Revisions() controller := &Controller{ Base: controller.NewBase(kubeClientSet, elaClientSet, kubeInformerFactory, elaInformerFactory, informer.Informer(), controllerAgentName, "Configurations", logger), - buildClientSet: buildClientSet, - lister: informer.Lister(), - synced: informer.Informer().HasSynced, - revisionsSynced: revisionInformer.Informer().HasSynced, + lister: informer.Lister(), + synced: informer.Informer().HasSynced, } - controller.Logger.Info("Setting up event handlers") - revisionInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.addRevisionEvent, - UpdateFunc: controller.updateRevisionEvent, - }) - return controller + for _, rcv := range receivers { + if dr, ok := rcv.(Receiver); ok { + controller.receivers = append(controller.receivers, dr) + } + } + if len(controller.receivers) == 0 { + return nil, fmt.Errorf("None of the provided receivers implement configuration.Receiver. " + + "If the Configuration controller is no longer needed it should be removed.") + } + return controller, nil } // Run will set up the event handlers for types we are interested in, as well @@ -96,12 +91,7 @@ func NewController( // workers to finish processing their current work items. func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { return c.RunController(threadiness, stopCh, - []cache.InformerSynced{c.synced, c.revisionsSynced}, c.syncHandler, "Configuration") -} - -// loggerWithConfigInfo enriches the logs with configuration name and namespace. -func loggerWithConfigInfo(logger *zap.SugaredLogger, ns string, name string) *zap.SugaredLogger { - return logger.With(zap.String(logkey.Namespace, ns), zap.String(logkey.Configuration, name)) + []cache.InformerSynced{c.synced}, c.syncHandler, "Configuration") } // syncHandler compares the actual state with the desired, and attempts to @@ -115,10 +105,8 @@ func (c *Controller) syncHandler(key string) error { return nil } - logger := loggerWithConfigInfo(c.Logger, namespace, name) - // Get the Configuration resource with this namespace/name - config, err := c.lister.Configurations(namespace).Get(name) + original, err := c.lister.Configurations(namespace).Get(name) if err != nil { // The resource may no longer exist, in which case we stop // processing. @@ -129,242 +117,13 @@ func (c *Controller) syncHandler(key string) error { return err } - // Don't modify the informer's copy. - config = config.DeepCopy() - - // Configuration business logic - if config.GetGeneration() == config.Status.ObservedGeneration { - // TODO(vaikas): Check to see if Status.LatestCreatedRevisionName is ready and update Status.LatestReady - logger.Infof("Skipping reconcile since already reconciled %d == %d", - config.Spec.Generation, config.Status.ObservedGeneration) - return nil - } - - logger.Infof("Running reconcile Configuration for %s\n%+v\n%+v\n", - config.Name, config, config.Spec.RevisionTemplate) - spec := config.Spec.RevisionTemplate.Spec - controllerRef := controller.NewConfigurationControllerRef(config) - - if config.Spec.Build != nil { - // TODO(mattmoor): Determine whether we reuse the previous build. - build := &buildv1alpha1.Build{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: config.Namespace, - GenerateName: fmt.Sprintf("%s-", config.Name), - }, - Spec: *config.Spec.Build, - } - build.OwnerReferences = append(build.OwnerReferences, *controllerRef) - created, err := c.buildClientSet.BuildV1alpha1().Builds(build.Namespace).Create(build) - if err != nil { - logger.Errorf("Failed to create Build:\n%+v\n%s", build, err) - c.Recorder.Eventf(config, corev1.EventTypeWarning, "CreationFailed", "Failed to create Build %q: %v", build.Name, err) - return err - } - logger.Infof("Created Build:\n%+v", created.Name) - c.Recorder.Eventf(config, corev1.EventTypeNormal, "Created", "Created Build %q", created.Name) - spec.BuildName = created.Name - } - - revName, err := generateRevisionName(config) - if err != nil { - return err - } - - revClient := c.ElaClientSet.ServingV1alpha1().Revisions(config.Namespace) - created, err := revClient.Get(revName, metav1.GetOptions{}) - if err != nil { - if !errors.IsNotFound(err) { - logger.Error("Revisions Get failed", zap.Error(err), zap.String(logkey.Revision, revName)) - return err - } - - rev := &v1alpha1.Revision{ - ObjectMeta: config.Spec.RevisionTemplate.ObjectMeta, - Spec: spec, - } - // TODO: Should this just use rev.ObjectMeta.GenerateName = - rev.ObjectMeta.Name = revName - // Can't generate objects in a different namespace from what the call is made against, - // so use the namespace of the configuration that's being updated for the Revision being - // created. - rev.ObjectMeta.Namespace = config.Namespace - - if rev.ObjectMeta.Labels == nil { - rev.ObjectMeta.Labels = make(map[string]string) - } - rev.ObjectMeta.Labels[serving.ConfigurationLabelKey] = config.Name - if rev.ObjectMeta.Annotations == nil { - rev.ObjectMeta.Annotations = make(map[string]string) - } - rev.ObjectMeta.Annotations[serving.ConfigurationGenerationAnnotationKey] = fmt.Sprintf("%v", config.Spec.Generation) - - // Delete revisions when the parent Configuration is deleted. - rev.OwnerReferences = append(rev.OwnerReferences, *controllerRef) - - created, err = revClient.Create(rev) - if err != nil { - logger.Errorf("Failed to create Revision:\n%+v\n%s", rev, err) - c.Recorder.Eventf(config, corev1.EventTypeWarning, "CreationFailed", "Failed to create Revision %q: %v", rev.Name, err) + for _, dr := range c.receivers { + // Don't modify the informer's copy, and give each receiver a fresh copy. + cp := original.DeepCopy() + if err := dr.SyncConfiguration(cp); err != nil { return err } - c.Recorder.Eventf(config, corev1.EventTypeNormal, "Created", "Created Revision %q", rev.Name) - logger.Infof("Created Revision:\n%+v", created) - } else { - logger.Infof("Revision already created %s: %s", created.ObjectMeta.Name, err) - } - // Update the Status of the configuration with the latest generation that - // we just reconciled against so we don't keep generating revisions. - // Also update the LatestCreatedRevisionName so that we'll know revision to check - // for ready state so that when ready, we can make it Latest. - config.Status.LatestCreatedRevisionName = created.ObjectMeta.Name - config.Status.ObservedGeneration = config.Spec.Generation - - logger.Infof("Updating the configuration status:\n%+v", config) - - if _, err = c.updateStatus(config); err != nil { - logger.Error("Failed to update the configuration", zap.Error(err)) - return err - } - - return nil -} - -func generateRevisionName(u *v1alpha1.Configuration) (string, error) { - // TODO: consider making sure the length of the - // string will not cause problems down the stack - if u.Spec.Generation == 0 { - return "", fmt.Errorf("configuration generation cannot be 0") - } - return fmt.Sprintf("%s-%05d", u.Name, u.Spec.Generation), nil -} - -func (c *Controller) updateStatus(u *v1alpha1.Configuration) (*v1alpha1.Configuration, error) { - configClient := c.ElaClientSet.ServingV1alpha1().Configurations(u.Namespace) - newu, err := configClient.Get(u.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - newu.Status = u.Status - - // TODO: for CRD there's no updatestatus, so use normal update - return configClient.Update(newu) - // return configClient.UpdateStatus(newu) -} - -func (c *Controller) addRevisionEvent(obj interface{}) { - revision := obj.(*v1alpha1.Revision) - revisionName := revision.Name - namespace := revision.Namespace - // Lookup and see if this Revision corresponds to a Configuration that - // we own and hence the Configuration that created this Revision. - configName := controller.LookupOwningConfigurationName(revision.OwnerReferences) - if configName == "" { - return - } - - logger := loggerWithConfigInfo(c.Logger, namespace, configName).With(zap.String(logkey.Revision, revisionName)) - ctx := logging.WithLogger(context.TODO(), logger) - - config, err := c.lister.Configurations(namespace).Get(configName) - if err != nil { - logger.Error("Error fetching configuration upon revision becoming ready", zap.Error(err)) - return - } - - if revision.Name != config.Status.LatestCreatedRevisionName { - // The revision isn't the latest created one, so ignore this event. - logger.Info("Revision is not the latest created one") - return - } - - // Don't modify the informer's copy. - config = config.DeepCopy() - - if !revision.Status.IsReady() { - logger.Infof("Revision %q of configuration %q is not ready", revisionName, config.Name) - - //add LatestRevision condition to be false with the status from the revision - c.markConfigurationLatestRevisionStatus(ctx, config, revision) - - if _, err := c.updateStatus(config); err != nil { - logger.Error("Error updating configuration", zap.Error(err)) - } - c.Recorder.Eventf(config, corev1.EventTypeNormal, "LatestRevisionUpdate", - "Latest revision of configuration is not ready") - - } else { - logger.Info("Revision is ready") - - alreadyReady := config.Status.IsReady() - if !alreadyReady { - c.markConfigurationReady(ctx, config, revision) - } - logger.Infof("Setting LatestReadyRevisionName of Configuration %q to revision %q", - config.Name, revision.Name) - config.Status.LatestReadyRevisionName = revision.Name - - if _, err := c.updateStatus(config); err != nil { - logger.Error("Error updating configuration", zap.Error(err)) - } - if !alreadyReady { - c.Recorder.Eventf(config, corev1.EventTypeNormal, "ConfigurationReady", - "Configuration becomes ready") - } - c.Recorder.Eventf(config, corev1.EventTypeNormal, "LatestReadyUpdate", - "LatestReadyRevisionName updated to %q", revision.Name) - - } - return -} - -func (c *Controller) updateRevisionEvent(old, new interface{}) { - c.addRevisionEvent(new) -} - -func getLatestRevisionStatusCondition(revision *v1alpha1.Revision) *v1alpha1.RevisionCondition { - for _, cond := range revision.Status.Conditions { - if !(cond.Type == v1alpha1.RevisionConditionReady && cond.Status == corev1.ConditionTrue) { - return &cond - } } return nil } - -// Mark ConfigurationConditionReady of Configuration ready as the given latest -// created revision is ready. -func (c *Controller) markConfigurationReady( - ctx context.Context, - config *v1alpha1.Configuration, revision *v1alpha1.Revision) { - logger := logging.FromContext(ctx) - logger.Info("Marking Configuration ready") - config.Status.RemoveCondition(v1alpha1.ConfigurationConditionLatestRevisionReady) - config.Status.SetCondition( - &v1alpha1.ConfigurationCondition{ - Type: v1alpha1.ConfigurationConditionReady, - Status: corev1.ConditionTrue, - Reason: "LatestRevisionReady", - }) -} - -// Mark ConfigurationConditionLatestRevisionReady of Configuration to false with the status -// from the revision -func (c *Controller) markConfigurationLatestRevisionStatus( - ctx context.Context, - config *v1alpha1.Configuration, revision *v1alpha1.Revision) { - logger := logging.FromContext(ctx) - config.Status.RemoveCondition(v1alpha1.ConfigurationConditionReady) - cond := getLatestRevisionStatusCondition(revision) - if cond == nil { - logger.Info("Revision status is not updated yet") - return - } - config.Status.SetCondition( - &v1alpha1.ConfigurationCondition{ - Type: v1alpha1.ConfigurationConditionLatestRevisionReady, - Status: corev1.ConditionFalse, - Reason: cond.Reason, - Message: cond.Message, - }) -} diff --git a/pkg/controller/configuration/queueing_test.go b/pkg/controller/configuration/queueing_test.go index 28fd612e4f4e..e4d7368ef449 100644 --- a/pkg/controller/configuration/queueing_test.go +++ b/pkg/controller/configuration/queueing_test.go @@ -20,13 +20,25 @@ import ( "testing" "time" + fakeclientset "github.com/knative/serving/pkg/client/clientset/versioned/fake" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + kubeinformers "k8s.io/client-go/informers" + fakekubeclientset "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" "github.com/knative/serving/pkg/apis/serving/v1alpha1" - + informers "github.com/knative/serving/pkg/client/informers/externalversions" + ctrl "github.com/knative/serving/pkg/controller" hooks "github.com/knative/serving/pkg/controller/testing" ) +const ( + testNamespace string = "test" +) + /* TODO tests: - syncHandler returns error (in processNextWorkItem) - invalid key in workqueue (in processNextWorkItem) @@ -34,6 +46,121 @@ import ( - invalid key given to syncHandler - resource doesn't exist in lister (from syncHandler) */ +func getTestConfiguration() *v1alpha1.Configuration { + return &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + SelfLink: "/apis/serving/v1alpha1/namespaces/test/configurations/test-config", + Name: "test-config", + Namespace: testNamespace, + }, + Spec: v1alpha1.ConfigurationSpec{ + //TODO(grantr): This is a workaround for generation initialization + Generation: 1, + RevisionTemplate: v1alpha1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "test-label": "test", + "example.com/namespaced-label": "test", + }, + Annotations: map[string]string{ + "test-annotation-1": "foo", + "test-annotation-2": "bar", + }, + }, + Spec: v1alpha1.RevisionSpec{ + ServiceAccountName: "test-account", + // corev1.Container has a lot of setting. We try to pass many + // of them here to verify that we pass through the settings to + // the derived Revisions. + Container: corev1.Container{ + Image: "gcr.io/repo/image", + Command: []string{"echo"}, + Args: []string{"hello", "world"}, + WorkingDir: "/tmp", + Env: []corev1.EnvVar{{ + Name: "EDITOR", + Value: "emacs", + }}, + LivenessProbe: &corev1.Probe{ + TimeoutSeconds: 42, + }, + ReadinessProbe: &corev1.Probe{ + TimeoutSeconds: 43, + }, + TerminationMessagePath: "/dev/null", + }, + }, + }, + }, + } +} + +type receiver struct { + elaClient *fakeclientset.Clientset +} + +func (r *receiver) SyncConfiguration(cfg *v1alpha1.Configuration) error { + _, err := r.elaClient.Serving().Revisions(cfg.Namespace).Create(&v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: cfg.Namespace, + Name: cfg.Name, + }, + }) + return err +} + +var _ Receiver = (*receiver)(nil) + +func newRunningTestController(t *testing.T, elaObjects ...runtime.Object) ( + kubeClient *fakekubeclientset.Clientset, + elaClient *fakeclientset.Clientset, + controller *Controller, + kubeInformer kubeinformers.SharedInformerFactory, + elaInformer informers.SharedInformerFactory, + stopCh chan struct{}) { + + // Create fake clients + kubeClient = fakekubeclientset.NewSimpleClientset() + // The ability to insert objects here is intended to work around the problem + // with watches not firing in client-go 1.9. When we update to client-go 1.10 + // this can probably be removed. + elaClient = fakeclientset.NewSimpleClientset(elaObjects...) + + // Create informer factories with fake clients. The second parameter sets the + // resync period to zero, disabling it. + kubeInformer = kubeinformers.NewSharedInformerFactory(kubeClient, 0) + elaInformer = informers.NewSharedInformerFactory(elaClient, 0) + + c, err := NewController( + kubeClient, + elaClient, + kubeInformer, + elaInformer, + &rest.Config{}, + ctrl.Config{}, + zap.NewNop().Sugar(), + &receiver{elaClient}, + ) + if err != nil { + t.Fatalf("error creating controller: %v", err) + } + controller = c.(*Controller) + + // Start the informers. This must happen after the call to NewController, + // otherwise there are no informers to be started. + stopCh = make(chan struct{}) + kubeInformer.Start(stopCh) + elaInformer.Start(stopCh) + + // Run the controller. + go func() { + if err := controller.Run(2, stopCh); err != nil { + t.Fatalf("Error running controller: %v", err) + } + }() + + return +} func TestNewConfigurationCallsSyncHandler(t *testing.T) { config := getTestConfiguration() diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 6e722789259e..cac24d7788f3 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -23,17 +23,14 @@ import ( clientset "github.com/knative/serving/pkg/client/clientset/versioned" elascheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" informers "github.com/knative/serving/pkg/client/informers/externalversions" - "github.com/knative/serving/pkg/logging/logkey" + "github.com/knative/serving/pkg/receiver" "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" ) @@ -51,23 +48,7 @@ func init() { // Base implements most of the boilerplate and common code // we have in our controllers. type Base struct { - // KubeClientSet allows us to talk to the k8s for core APIs - KubeClientSet kubernetes.Interface - - // ElaClientSet allows us to configure Ela objects - ElaClientSet clientset.Interface - - // KubeInformerFactory provides shared informers for resources - // in all known API group versions - KubeInformerFactory kubeinformers.SharedInformerFactory - - // ElaInformerFactory provides shared informers for resources - // in all known API group versions - ElaInformerFactory informers.SharedInformerFactory - - // Recorder is an event recorder for recording Event resources to the - // Kubernetes API. - Recorder record.EventRecorder + *receiver.Base // WorkQueue is a rate limited work queue. This is used to queue work to be // processed instead of performing it as soon as a change happens. This @@ -75,13 +56,6 @@ type Base struct { // time, and makes it easy to ensure we are never processing the same item // simultaneously in two different workers. WorkQueue workqueue.RateLimitingInterface - - // Sugared logger is easier to use but is not as performant as the - // raw logger. In performance critical paths, call logger.Desugar() - // and use the returned raw logger instead. In addition to the - // performance benefits, raw logger also preserves type-safety at - // the expense of slightly greater verbosity. - Logger *zap.SugaredLogger } // NewBase instantiates a new instance of Base implementing @@ -95,25 +69,10 @@ func NewBase( controllerAgentName string, workQueueName string, logger *zap.SugaredLogger) *Base { - - // Enrich the logs with controller name - logger = logger.Named(controllerAgentName).With(zap.String(logkey.ControllerType, controllerAgentName)) - - // Create event broadcaster - logger.Debug("Creating event broadcaster") - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof) - eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClientSet.CoreV1().Events("")}) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) - base := &Base{ - KubeClientSet: kubeClientSet, - ElaClientSet: elaClientSet, - KubeInformerFactory: kubeInformerFactory, - ElaInformerFactory: elaInformerFactory, - Recorder: recorder, - WorkQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName), - Logger: logger, + Base: receiver.NewBase(kubeClientSet, elaClientSet, kubeInformerFactory, elaInformerFactory, + informer, controllerAgentName, logger), + WorkQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName), } // Set up an event handler for when the resource types of interest change diff --git a/pkg/controller/deployment/deployment.go b/pkg/controller/deployment/deployment.go new file mode 100644 index 000000000000..82e9f9c8d20d --- /dev/null +++ b/pkg/controller/deployment/deployment.go @@ -0,0 +1,129 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "fmt" + + clientset "github.com/knative/serving/pkg/client/clientset/versioned" + informers "github.com/knative/serving/pkg/client/informers/externalversions" + "github.com/knative/serving/pkg/controller" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/runtime" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + listers "k8s.io/client-go/listers/apps/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +const controllerAgentName = "deployment-controller" + +// Receiver defines the interface that a receiver must implement to receive events +// from this Controller. +type Receiver interface { + SyncDeployment(*appsv1.Deployment) error +} + +// Controller implements the controller for Deployment resources +type Controller struct { + *controller.Base + + // lister indexes properties about Deployment + lister listers.DeploymentLister + synced cache.InformerSynced + + receivers []Receiver +} + +// NewController creates a new Deployment controller +func NewController( + kubeClientSet kubernetes.Interface, + elaClientSet clientset.Interface, + kubeInformerFactory kubeinformers.SharedInformerFactory, + elaInformerFactory informers.SharedInformerFactory, + config *rest.Config, + controllerConfig controller.Config, + logger *zap.SugaredLogger, + receivers ...interface{}) (controller.Interface, error) { + + // obtain references to a shared index informer for the Deployment type. + informer := kubeInformerFactory.Apps().V1().Deployments() + + controller := &Controller{ + Base: controller.NewBase(kubeClientSet, elaClientSet, kubeInformerFactory, + elaInformerFactory, informer.Informer(), controllerAgentName, "Deployments", logger), + lister: informer.Lister(), + synced: informer.Informer().HasSynced, + } + + for _, rcv := range receivers { + if dr, ok := rcv.(Receiver); ok { + controller.receivers = append(controller.receivers, dr) + } + } + if len(controller.receivers) == 0 { + return nil, fmt.Errorf("None of the provided receivers implement deployment.Receiver. " + + "If the Deployment controller is no longer needed it should be removed.") + } + return controller, nil +} + +// Run will set up the event handlers for types we are interested in, as well +// as syncing informer caches and starting workers. It will block until stopCh +// is closed, at which point it will shutdown the workqueue and wait for +// workers to finish processing their current work items. +func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { + return c.RunController(threadiness, stopCh, + []cache.InformerSynced{c.synced}, c.syncHandler, "Deployment") +} + +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Deployment +// resource with the current status of the resource. +func (c *Controller) syncHandler(key string) error { + // Convert the namespace/name string into a distinct namespace and name + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) + return nil + } + + // Get the Deployment resource with this namespace/name + original, err := c.lister.Deployments(namespace).Get(name) + if err != nil { + // The resource may no longer exist, in which case we stop + // processing. + if errors.IsNotFound(err) { + runtime.HandleError(fmt.Errorf("deployment %q in work queue no longer exists", key)) + return nil + } + + return err + } + + for _, dr := range c.receivers { + // Don't modify the informer's copy, and give each receiver a fresh copy. + cp := original.DeepCopy() + if err := dr.SyncDeployment(cp); err != nil { + return err + } + } + return nil +} diff --git a/pkg/controller/endpoint/endpoint.go b/pkg/controller/endpoint/endpoint.go new file mode 100644 index 000000000000..b8da4e88c377 --- /dev/null +++ b/pkg/controller/endpoint/endpoint.go @@ -0,0 +1,129 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoint + +import ( + "fmt" + + clientset "github.com/knative/serving/pkg/client/clientset/versioned" + informers "github.com/knative/serving/pkg/client/informers/externalversions" + "github.com/knative/serving/pkg/controller" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/runtime" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +const controllerAgentName = "endpoint-controller" + +// Receiver defines the interface that a receiver must implement to receive events +// from this Controller. +type Receiver interface { + SyncEndpoint(*corev1.Endpoints) error +} + +// Controller implements the controller for Endpoint resources +type Controller struct { + *controller.Base + + // lister indexes properties about Endpoint + lister listers.EndpointsLister + synced cache.InformerSynced + + receivers []Receiver +} + +// NewController creates a new Endpoint controller +func NewController( + kubeClientSet kubernetes.Interface, + elaClientSet clientset.Interface, + kubeInformerFactory kubeinformers.SharedInformerFactory, + elaInformerFactory informers.SharedInformerFactory, + config *rest.Config, + controllerConfig controller.Config, + logger *zap.SugaredLogger, + receivers ...interface{}) (controller.Interface, error) { + + // obtain references to a shared index informer for the Endpoint type. + informer := kubeInformerFactory.Core().V1().Endpoints() + + controller := &Controller{ + Base: controller.NewBase(kubeClientSet, elaClientSet, kubeInformerFactory, + elaInformerFactory, informer.Informer(), controllerAgentName, "Endpoints", logger), + lister: informer.Lister(), + synced: informer.Informer().HasSynced, + } + + for _, rcv := range receivers { + if dr, ok := rcv.(Receiver); ok { + controller.receivers = append(controller.receivers, dr) + } + } + if len(controller.receivers) == 0 { + return nil, fmt.Errorf("None of the provided receivers implement endpoint.Receiver. " + + "If the Endpoint controller is no longer needed it should be removed.") + } + return controller, nil +} + +// Run will set up the event handlers for types we are interested in, as well +// as syncing informer caches and starting workers. It will block until stopCh +// is closed, at which point it will shutdown the workqueue and wait for +// workers to finish processing their current work items. +func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { + return c.RunController(threadiness, stopCh, + []cache.InformerSynced{c.synced}, c.syncHandler, "Endpoint") +} + +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Endpoint +// resource with the current status of the resource. +func (c *Controller) syncHandler(key string) error { + // Convert the namespace/name string into a distinct namespace and name + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) + return nil + } + + // Get the Endpoint resource with this namespace/name + original, err := c.lister.Endpoints(namespace).Get(name) + if err != nil { + // The resource may no longer exist, in which case we stop + // processing. + if errors.IsNotFound(err) { + runtime.HandleError(fmt.Errorf("endpoint %q in work queue no longer exists", key)) + return nil + } + + return err + } + + for _, dr := range c.receivers { + // Don't modify the informer's copy, and give each receiver a fresh copy. + cp := original.DeepCopy() + if err := dr.SyncEndpoint(cp); err != nil { + return err + } + } + return nil +} diff --git a/pkg/controller/ingress/ingress.go b/pkg/controller/ingress/ingress.go new file mode 100644 index 000000000000..9bddf8852ae3 --- /dev/null +++ b/pkg/controller/ingress/ingress.go @@ -0,0 +1,129 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "fmt" + + clientset "github.com/knative/serving/pkg/client/clientset/versioned" + informers "github.com/knative/serving/pkg/client/informers/externalversions" + "github.com/knative/serving/pkg/controller" + "go.uber.org/zap" + extv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/runtime" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + listers "k8s.io/client-go/listers/extensions/v1beta1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +const controllerAgentName = "ingress-controller" + +// Receiver defines the interface that a receiver must implement to receive events +// from this Controller. +type Receiver interface { + SyncIngress(*extv1beta1.Ingress) error +} + +// Controller implements the controller for Ingress resources +type Controller struct { + *controller.Base + + // lister indexes properties about Ingress + lister listers.IngressLister + synced cache.InformerSynced + + receivers []Receiver +} + +// NewController creates a new Ingress controller +func NewController( + kubeClientSet kubernetes.Interface, + elaClientSet clientset.Interface, + kubeInformerFactory kubeinformers.SharedInformerFactory, + elaInformerFactory informers.SharedInformerFactory, + config *rest.Config, + controllerConfig controller.Config, + logger *zap.SugaredLogger, + receivers ...interface{}) (controller.Interface, error) { + + // obtain references to a shared index informer for the Ingress type. + informer := kubeInformerFactory.Extensions().V1beta1().Ingresses() + + controller := &Controller{ + Base: controller.NewBase(kubeClientSet, elaClientSet, kubeInformerFactory, + elaInformerFactory, informer.Informer(), controllerAgentName, "Ingresses", logger), + lister: informer.Lister(), + synced: informer.Informer().HasSynced, + } + + for _, rcv := range receivers { + if dr, ok := rcv.(Receiver); ok { + controller.receivers = append(controller.receivers, dr) + } + } + if len(controller.receivers) == 0 { + return nil, fmt.Errorf("None of the provided receivers implement ingress.Receiver. " + + "If the Ingress controller is no longer needed it should be removed.") + } + return controller, nil +} + +// Run will set up the event handlers for types we are interested in, as well +// as syncing informer caches and starting workers. It will block until stopCh +// is closed, at which point it will shutdown the workqueue and wait for +// workers to finish processing their current work items. +func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { + return c.RunController(threadiness, stopCh, + []cache.InformerSynced{c.synced}, c.syncHandler, "Ingress") +} + +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Ingress +// resource with the current status of the resource. +func (c *Controller) syncHandler(key string) error { + // Convert the namespace/name string into a distinct namespace and name + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) + return nil + } + + // Get the Ingress resource with this namespace/name + original, err := c.lister.Ingresses(namespace).Get(name) + if err != nil { + // The resource may no longer exist, in which case we stop + // processing. + if errors.IsNotFound(err) { + runtime.HandleError(fmt.Errorf("ingress %q in work queue no longer exists", key)) + return nil + } + + return err + } + + for _, dr := range c.receivers { + // Don't modify the informer's copy, and give each receiver a fresh copy. + cp := original.DeepCopy() + if err := dr.SyncIngress(cp); err != nil { + return err + } + } + return nil +} diff --git a/pkg/controller/revision/queueing_test.go b/pkg/controller/revision/queueing_test.go index 738f4012e07c..0fb02457cf80 100644 --- a/pkg/controller/revision/queueing_test.go +++ b/pkg/controller/revision/queueing_test.go @@ -20,9 +20,20 @@ import ( "testing" "time" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + kubeinformers "k8s.io/client-go/informers" + fakekubeclientset "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + "github.com/knative/serving/pkg/apis/serving" + "github.com/knative/serving/pkg/apis/serving/v1alpha1" + fakeclientset "github.com/knative/serving/pkg/client/clientset/versioned/fake" + informers "github.com/knative/serving/pkg/client/informers/externalversions" + "github.com/knative/serving/pkg/controller" . "github.com/knative/serving/pkg/controller/testing" ) @@ -34,6 +45,121 @@ import ( - resource doesn't exist in lister (from syncHandler) */ +const testNamespace string = "test" + +func getTestRevision() *v1alpha1.Revision { + return &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + SelfLink: "/apis/serving/v1alpha1/namespaces/test/revisions/test-rev", + Name: "test-rev", + Namespace: testNamespace, + Labels: map[string]string{ + "testLabel1": "foo", + "testLabel2": "bar", + serving.RouteLabelKey: "test-route", + }, + Annotations: map[string]string{ + "testAnnotation": "test", + }, + UID: "test-rev-uid", + }, + Spec: v1alpha1.RevisionSpec{ + // corev1.Container has a lot of setting. We try to pass many + // of them here to verify that we pass through the settings to + // derived objects. + Container: corev1.Container{ + Image: "gcr.io/repo/image", + Command: []string{"echo"}, + Args: []string{"hello", "world"}, + WorkingDir: "/tmp", + Env: []corev1.EnvVar{{ + Name: "EDITOR", + Value: "emacs", + }}, + LivenessProbe: &corev1.Probe{ + TimeoutSeconds: 42, + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "health", + }, + }, + TimeoutSeconds: 43, + }, + TerminationMessagePath: "/dev/null", + }, + ServingState: v1alpha1.RevisionServingStateActive, + }, + } +} + +type receiver struct { + kubeClient *fakekubeclientset.Clientset +} + +func (r *receiver) SyncRevision(rev *v1alpha1.Revision) error { + _, err := r.kubeClient.Core().Services(rev.Namespace).Create(&corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: rev.Namespace, + Name: rev.Name, + }, + }) + return err +} + +var _ Receiver = (*receiver)(nil) + +func newRunningTestController(t *testing.T, elaObjects ...runtime.Object) ( + kubeClient *fakekubeclientset.Clientset, + elaClient *fakeclientset.Clientset, + kontroller *Controller, + kubeInformer kubeinformers.SharedInformerFactory, + elaInformer informers.SharedInformerFactory, + stopCh chan struct{}) { + + // Create fake clients + kubeClient = fakekubeclientset.NewSimpleClientset() + elaClient = fakeclientset.NewSimpleClientset(elaObjects...) + + // Create informer factories with fake clients. The second parameter sets the + // resync period to zero, disabling it. + kubeInformer = kubeinformers.NewSharedInformerFactory(kubeClient, 0) + elaInformer = informers.NewSharedInformerFactory(elaClient, 0) + + var err error + ctrl, err := NewController( + kubeClient, + elaClient, + kubeInformer, + elaInformer, + // buildInformer, + &rest.Config{}, + controller.Config{}, + zap.NewNop().Sugar(), + &receiver{kubeClient}, + ) + if err != nil { + t.Fatalf("Error creating controller: %v", err) + } + kontroller = ctrl.(*Controller) + + // Start the informers. This must happen after the call to NewController, + // otherwise there are no informers to be started. + stopCh = make(chan struct{}) + kubeInformer.Start(stopCh) + elaInformer.Start(stopCh) + + // Run the controller. + go func() { + if err := kontroller.Run(2, stopCh); err != nil { + t.Fatalf("Error running controller: %v", err) + } + }() + + return +} + func TestNewRevisionCallsSyncHandler(t *testing.T) { rev := getTestRevision() // TODO(grantr): inserting the route at client creation is necessary diff --git a/pkg/controller/revision/revision.go b/pkg/controller/revision/revision.go index ed46bddf31db..3fc5abb0798f 100644 --- a/pkg/controller/revision/revision.go +++ b/pkg/controller/revision/revision.go @@ -17,75 +17,31 @@ limitations under the License. package revision import ( - "context" "fmt" - "net/http" - "reflect" - "strings" - "time" - - "github.com/josephburnett/k8sflag/pkg/k8sflag" - "github.com/knative/serving/pkg/apis/serving" - "github.com/knative/serving/pkg/logging" - "github.com/knative/serving/pkg/logging/logkey" - "go.uber.org/zap" + "github.com/knative/serving/pkg/apis/serving/v1alpha1" clientset "github.com/knative/serving/pkg/client/clientset/versioned" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - kubeinformers "k8s.io/client-go/informers" - informers "github.com/knative/serving/pkg/client/informers/externalversions" + listers "github.com/knative/serving/pkg/client/listers/serving/v1alpha1" + "github.com/knative/serving/pkg/controller" + "go.uber.org/zap" "k8s.io/apimachinery/pkg/api/errors" - apierrs "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/runtime" + kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" - - buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" - buildinformers "github.com/knative/build/pkg/client/informers/externalversions" - "github.com/knative/serving/pkg/apis/serving/v1alpha1" - listers "github.com/knative/serving/pkg/client/listers/serving/v1alpha1" - "github.com/knative/serving/pkg/controller" ) -const ( - userContainerName string = "user-container" - userPortName string = "user-port" - userPort = 8080 - - fluentdContainerName string = "fluentd-proxy" - queueContainerName string = "queue-proxy" - // queueSidecarName set by -queueSidecarName flag - queueHTTPPortName string = "queue-http-port" - - requestQueueContainerName string = "request-queue" +const controllerAgentName = "revision-controller" - controllerAgentName = "revision-controller" - autoscalerPort = 8080 - - serviceTimeoutDuration = 5 * time.Minute - sidecarIstioInjectAnnotation = "sidecar.istio.io/inject" - // TODO (arvtiwar): this should be a config option. - progressDeadlineSeconds int32 = 120 -) - -var ( - elaPodReplicaCount = int32(1) - elaPodMaxUnavailable = intstr.IntOrString{Type: intstr.Int, IntVal: 1} - elaPodMaxSurge = intstr.IntOrString{Type: intstr.Int, IntVal: 1} -) - -type resolver interface { - Resolve(*appsv1.Deployment) error +// Receiver defines the interface that a receiver must implement to receive events +// from this Controller. +type Receiver interface { + SyncRevision(*v1alpha1.Revision) error } -// Controller implements the controller for Revision resources. -// +controller:group=ela,version=v1alpha1,kind=Revision,resource=revisions +// Controller implements the controller for Revision resources type Controller struct { *controller.Base @@ -93,112 +49,40 @@ type Controller struct { lister listers.RevisionLister synced cache.InformerSynced - buildtracker *buildTracker - - resolver resolver - - // don't start the workers until endpoints cache have been synced - endpointsSynced cache.InformerSynced - - // enableVarLogCollection dedicates whether to set up a fluentd sidecar to - // collect logs under /var/log/. - enableVarLogCollection bool - - // controllerConfig includes the configurations for the controller - controllerConfig *ControllerConfig -} - -// ControllerConfig includes the configurations for the controller. -type ControllerConfig struct { - // Autoscale part - - // see (config-autoscaler.yaml) - AutoscaleConcurrencyQuantumOfTime *k8sflag.DurationFlag - AutoscaleEnableSingleConcurrency *k8sflag.BoolFlag - - // AutoscalerImage is the name of the image used for the autoscaler pod. - AutoscalerImage string - - // QueueSidecarImage is the name of the image used for the queue sidecar - // injected into the revision pod - QueueSidecarImage string - - // logging part - - // EnableVarLogCollection dedicates whether to set up a fluentd sidecar to - // collect logs under /var/log/. - EnableVarLogCollection bool - - // TODO(#818): Use the fluentd deamon set to collect /var/log. - // FluentdSidecarImage is the name of the image used for the fluentd sidecar - // injected into the revision pod. It is used only when enableVarLogCollection - // is true. - FluentdSidecarImage string - // FluentdSidecarOutputConfig is the config for fluentd sidecar to specify - // logging output destination. - FluentdSidecarOutputConfig string - - // LoggingURLTemplate is a string containing the logging url template where - // the variable REVISION_UID will be replaced with the created revision's UID. - LoggingURLTemplate string - - // QueueProxyLoggingConfig is a string containing the logger configuration for queue proxy. - QueueProxyLoggingConfig string - - // QueueProxyLoggingLevel is a string containing the logger level for queue proxy. - QueueProxyLoggingLevel string + receivers []Receiver } -// NewController initializes the controller and is called by the generated code -// Registers eventhandlers to enqueue events -// config - client configuration for talking to the apiserver -// si - informer factory shared across all controllers for listening to events and indexing resource properties -// queue - message queue for handling new events. unique to this controller. +// NewController creates a new Revision controller func NewController( kubeClientSet kubernetes.Interface, elaClientSet clientset.Interface, kubeInformerFactory kubeinformers.SharedInformerFactory, elaInformerFactory informers.SharedInformerFactory, - buildInformerFactory buildinformers.SharedInformerFactory, config *rest.Config, - controllerConfig *ControllerConfig, - logger *zap.SugaredLogger) controller.Interface { + controllerConfig controller.Config, + logger *zap.SugaredLogger, + receivers ...interface{}) (controller.Interface, error) { - // obtain references to a shared index informer for the Revision and Endpoint type. + // obtain references to a shared index informer for the Revision type. informer := elaInformerFactory.Serving().V1alpha1().Revisions() - endpointsInformer := kubeInformerFactory.Core().V1().Endpoints() - deploymentInformer := kubeInformerFactory.Apps().V1().Deployments() controller := &Controller{ Base: controller.NewBase(kubeClientSet, elaClientSet, kubeInformerFactory, elaInformerFactory, informer.Informer(), controllerAgentName, "Revisions", logger), - lister: informer.Lister(), - synced: informer.Informer().HasSynced, - endpointsSynced: endpointsInformer.Informer().HasSynced, - buildtracker: &buildTracker{builds: map[key]set{}}, - resolver: &digestResolver{client: kubeClientSet, transport: http.DefaultTransport}, - controllerConfig: controllerConfig, + lister: informer.Lister(), + synced: informer.Informer().HasSynced, } - controller.Logger.Info("Setting up event handlers") - // Obtain a reference to a shared index informer for the Build type. - buildInformer := buildInformerFactory.Build().V1alpha1().Builds() - buildInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.addBuildEvent, - UpdateFunc: controller.updateBuildEvent, - }) - - endpointsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.addEndpointsEvent, - UpdateFunc: controller.updateEndpointsEvent, - }) - - deploymentInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.addDeploymentProgressEvent, - UpdateFunc: controller.updateDeploymentProgressEvent, - }) - - return controller + for _, rcv := range receivers { + if dr, ok := rcv.(Receiver); ok { + controller.receivers = append(controller.receivers, dr) + } + } + if len(controller.receivers) == 0 { + return nil, fmt.Errorf("None of the provided receivers implement revision.Receiver. " + + "If the Revision controller is no longer needed it should be removed.") + } + return controller, nil } // Run will set up the event handlers for types we are interested in, as well @@ -206,18 +90,13 @@ func NewController( // is closed, at which point it will shutdown the workqueue and wait for // workers to finish processing their current work items. func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { - return c.RunController(threadiness, stopCh, []cache.InformerSynced{c.synced, c.endpointsSynced}, - c.syncHandler, "Revision") -} - -// loggerWithRevisionInfo enriches the logs with revision name and namespace. -func loggerWithRevisionInfo(logger *zap.SugaredLogger, ns string, name string) *zap.SugaredLogger { - return logger.With(zap.String(logkey.Namespace, ns), zap.String(logkey.Revision, name)) + return c.RunController(threadiness, stopCh, + []cache.InformerSynced{c.synced}, c.syncHandler, "Revision") } // syncHandler compares the actual state with the desired, and attempts to -// converge the two. It then updates the Status block of the Revision resource -// with the current status of the resource. +// converge the two. It then updates the Status block of the Revision +// resource with the current status of the resource. func (c *Controller) syncHandler(key string) error { // Convert the namespace/name string into a distinct namespace and name namespace, name, err := cache.SplitMetaNamespaceKey(key) @@ -226,12 +105,8 @@ func (c *Controller) syncHandler(key string) error { return nil } - logger := loggerWithRevisionInfo(c.Logger, namespace, name) - ctx := logging.WithLogger(context.TODO(), logger) - logger.Info("Running reconcile Revision") - // Get the Revision resource with this namespace/name - rev, err := c.lister.Revisions(namespace).Get(name) + original, err := c.lister.Revisions(namespace).Get(name) if err != nil { // The resource may no longer exist, in which case we stop // processing. @@ -239,800 +114,16 @@ func (c *Controller) syncHandler(key string) error { runtime.HandleError(fmt.Errorf("revision %q in work queue no longer exists", key)) return nil } - return err - } - // Don't modify the informer's copy. - rev = rev.DeepCopy() - - if err := c.updateRevisionLoggingURL(rev); err != nil { - logger.Error("Error updating the revisions logging url", zap.Error(err)) - return err - } - - if rev.Spec.BuildName != "" { - if done, failed := isBuildDone(rev); !done { - if alreadyTracked := c.buildtracker.Track(rev); !alreadyTracked { - if err := c.markRevisionBuilding(ctx, rev); err != nil { - logger.Error("Error recording the BuildSucceeded=Unknown condition", zap.Error(err)) - return err - } - } - return nil - } else { - // The Build's complete, so stop tracking it. - c.buildtracker.Untrack(rev) - if failed { - return nil - } - // If the build didn't fail, proceed to creating K8s resources. - } - } - - _, err = controller.GetOrCreateRevisionNamespace(ctx, namespace, c.KubeClientSet) - if err != nil { - logger.Panic("Failed to create namespace", zap.Error(err)) - } - logger.Info("Namespace validated to exist, moving on") - - return c.reconcileWithImage(ctx, rev, namespace) -} - -// reconcileWithImage handles enqueued messages that have an image. -func (c *Controller) reconcileWithImage(ctx context.Context, rev *v1alpha1.Revision, ns string) error { - err := c.reconcileOnceBuilt(ctx, rev, ns) - if err != nil { - logging.FromContext(ctx).Error("Reconcile once build failed", zap.Error(err)) - } - return err -} - -func (c *Controller) updateRevisionLoggingURL(rev *v1alpha1.Revision) error { - logURLTmpl := c.controllerConfig.LoggingURLTemplate - if logURLTmpl == "" { - return nil - } - - url := strings.Replace(logURLTmpl, "${REVISION_UID}", string(rev.UID), -1) - - if rev.Status.LogURL == url { - return nil - } - rev.Status.LogURL = url - _, err := c.updateStatus(rev) - return err -} - -// Checks whether the Revision knows whether the build is done. -// TODO(mattmoor): Use a method on the Build type. -func isBuildDone(rev *v1alpha1.Revision) (done, failed bool) { - if rev.Spec.BuildName == "" { - return true, false - } - for _, cond := range rev.Status.Conditions { - if cond.Type != v1alpha1.RevisionConditionBuildSucceeded { - continue - } - switch cond.Status { - case corev1.ConditionTrue: - return true, false - case corev1.ConditionFalse: - return true, true - case corev1.ConditionUnknown: - return false, false - } - } - return false, false -} - -func (c *Controller) markRevisionReady(ctx context.Context, rev *v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - logger.Info("Marking Revision ready") - rev.Status.SetCondition( - &v1alpha1.RevisionCondition{ - Type: v1alpha1.RevisionConditionReady, - Status: corev1.ConditionTrue, - Reason: "ServiceReady", - }) - _, err := c.updateStatus(rev) - return err -} - -func (c *Controller) markRevisionFailed(ctx context.Context, rev *v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - logger.Info("Marking Revision failed") - reason, message := "ServiceTimeout", "Timed out waiting for a service endpoint to become ready" - rev.Status.SetCondition( - &v1alpha1.RevisionCondition{ - Type: v1alpha1.RevisionConditionResourcesAvailable, - Status: corev1.ConditionFalse, - Reason: reason, - Message: message, - }) - rev.Status.SetCondition( - &v1alpha1.RevisionCondition{ - Type: v1alpha1.RevisionConditionReady, - Status: corev1.ConditionFalse, - Reason: reason, - Message: message, - }) - _, err := c.updateStatus(rev) - return err -} - -func (c *Controller) markRevisionBuilding(ctx context.Context, rev *v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - reason := "Building" - logger.Infof("Marking Revision %s", reason) - rev.Status.SetCondition( - &v1alpha1.RevisionCondition{ - Type: v1alpha1.RevisionConditionBuildSucceeded, - Status: corev1.ConditionUnknown, - Reason: reason, - }) - rev.Status.SetCondition( - &v1alpha1.RevisionCondition{ - Type: v1alpha1.RevisionConditionReady, - Status: corev1.ConditionUnknown, - Reason: reason, - }) - // Let this trigger a reconciliation loop. - _, err := c.updateStatus(rev) - return err -} - -func (c *Controller) markBuildComplete(rev *v1alpha1.Revision, bc *buildv1alpha1.BuildCondition) error { - switch bc.Type { - case buildv1alpha1.BuildComplete: - rev.Status.SetCondition( - &v1alpha1.RevisionCondition{ - Type: v1alpha1.RevisionConditionBuildSucceeded, - Status: corev1.ConditionTrue, - }) - c.Recorder.Event(rev, corev1.EventTypeNormal, "BuildComplete", bc.Message) - case buildv1alpha1.BuildFailed, buildv1alpha1.BuildInvalid: - rev.Status.SetCondition( - &v1alpha1.RevisionCondition{ - Type: v1alpha1.RevisionConditionBuildSucceeded, - Status: corev1.ConditionFalse, - Reason: bc.Reason, - Message: bc.Message, - }) - rev.Status.SetCondition( - &v1alpha1.RevisionCondition{ - Type: v1alpha1.RevisionConditionReady, - Status: corev1.ConditionFalse, - Reason: bc.Reason, - Message: bc.Message, - }) - c.Recorder.Event(rev, corev1.EventTypeWarning, "BuildFailed", bc.Message) - } - // This will trigger a reconciliation that will cause us to stop tracking the build. - _, err := c.updateStatus(rev) - return err -} - -func getBuildDoneCondition(build *buildv1alpha1.Build) *buildv1alpha1.BuildCondition { - for _, cond := range build.Status.Conditions { - if cond.Status != corev1.ConditionTrue { - continue - } - switch cond.Type { - case buildv1alpha1.BuildComplete, buildv1alpha1.BuildFailed, buildv1alpha1.BuildInvalid: - return &cond - } - } - return nil -} - -func getIsServiceReady(e *corev1.Endpoints) bool { - for _, es := range e.Subsets { - if len(es.Addresses) > 0 { - return true - } - } - return false -} - -func getRevisionLastTransitionTime(r *v1alpha1.Revision) time.Time { - condCount := len(r.Status.Conditions) - if condCount == 0 { - return r.CreationTimestamp.Time - } - return r.Status.Conditions[condCount-1].LastTransitionTime.Time -} - -func getDeploymentProgressCondition(deployment *appsv1.Deployment) *appsv1.DeploymentCondition { - - //as per https://kubernetes.io/docs/concepts/workloads/controllers/deployment - for _, cond := range deployment.Status.Conditions { - // Look for Deployment with status False - if cond.Status != corev1.ConditionFalse { - continue - } - // with Type Progressing and Reason Timeout - // TODO (arvtiwar): hard coding "ProgressDeadlineExceeded" to avoid import kubernetes/kubernetes - if cond.Type == appsv1.DeploymentProgressing && cond.Reason == "ProgressDeadlineExceeded" { - return &cond - } - } - return nil -} - -func (c *Controller) addBuildEvent(obj interface{}) { - build := obj.(*buildv1alpha1.Build) - - cond := getBuildDoneCondition(build) - if cond == nil { - // The build isn't done, so ignore this event. - return - } - - // For each of the revisions watching this build, mark their build phase as complete. - for k := range c.buildtracker.GetTrackers(build) { - // Look up the revision to mark complete. - namespace, name := splitKey(k) - rev, err := c.lister.Revisions(namespace).Get(name) - if err != nil { - c.Logger.Error("Error fetching revision upon build completion", - zap.String(logkey.Namespace, namespace), zap.String(logkey.Revision, name), zap.Error(err)) - } - if err := c.markBuildComplete(rev, cond); err != nil { - c.Logger.Error("Error marking build completion", - zap.String(logkey.Namespace, namespace), zap.String(logkey.Revision, name), zap.Error(err)) - } - } - - return -} - -func (c *Controller) updateBuildEvent(old, new interface{}) { - c.addBuildEvent(new) -} - -func (c *Controller) addDeploymentProgressEvent(obj interface{}) { - deployment := obj.(*appsv1.Deployment) - cond := getDeploymentProgressCondition(deployment) - - if cond == nil { - return - } - - //Get the handle of Revision in context - revName := deployment.Name - namespace := deployment.Namespace - logger := loggerWithRevisionInfo(c.Logger, namespace, revName) - - rev, err := c.lister.Revisions(namespace).Get(revName) - if err != nil { - logger.Error("Error fetching revision", zap.Error(err)) - return - } - //Set the revision condition reason to ProgressDeadlineExceeded - rev.Status.SetCondition( - &v1alpha1.RevisionCondition{ - Type: v1alpha1.RevisionConditionReady, - Status: corev1.ConditionFalse, - Reason: "ProgressDeadlineExceeded", - Message: fmt.Sprintf("Unable to create pods for more than %d seconds.", progressDeadlineSeconds), - }) - - logger.Infof("Updating status with the following conditions %+v", rev.Status.Conditions) - if _, err := c.updateStatus(rev); err != nil { - logger.Error("Error recording revision completion", zap.Error(err)) - return - } - c.Recorder.Eventf(rev, corev1.EventTypeNormal, "ProgressDeadlineExceeded", "Revision %s not ready due to Deployment timeout", revName) - return -} - -func (c *Controller) updateDeploymentProgressEvent(old, new interface{}) { - c.addDeploymentProgressEvent(new) -} - -func (c *Controller) addEndpointsEvent(obj interface{}) { - endpoint := obj.(*corev1.Endpoints) - eName := endpoint.Name - namespace := endpoint.Namespace - // Lookup and see if this endpoints corresponds to a service that - // we own and hence the Revision that created this service. - revName := lookupServiceOwner(endpoint) - if revName == "" { - return - } - logger := loggerWithRevisionInfo(c.Logger, namespace, revName) - ctx := logging.WithLogger(context.TODO(), logger) - - rev, err := c.lister.Revisions(namespace).Get(revName) - if err != nil { - logger.Error("Error fetching revision", zap.Error(err)) - return - } - - // Check to see if endpoint is the service endpoint - if eName != controller.GetElaK8SServiceNameForRevision(rev) { - return - } - - // Check to see if the revision has already been marked as ready or failed - // and if it is, then there's no need to do anything to it. - if rev.Status.IsReady() || rev.Status.IsFailed() { - return - } - - // Don't modify the informer's copy. - rev = rev.DeepCopy() - - if getIsServiceReady(endpoint) { - logger.Infof("Endpoint %q is ready", eName) - if err := c.markRevisionReady(ctx, rev); err != nil { - logger.Error("Error marking revision ready", zap.Error(err)) - return - } - c.Recorder.Eventf(rev, corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon endpoint %q becoming ready", endpoint.Name) - return - } - - revisionAge := time.Now().Sub(getRevisionLastTransitionTime(rev)) - if revisionAge < serviceTimeoutDuration { - return - } - - if err := c.markRevisionFailed(ctx, rev); err != nil { - logger.Error("Error marking revision failed", zap.Error(err)) - return - } - c.Recorder.Eventf(rev, corev1.EventTypeWarning, "RevisionFailed", "Revision did not become ready due to endpoint %q", endpoint.Name) - return -} - -func (c *Controller) updateEndpointsEvent(old, new interface{}) { - c.addEndpointsEvent(new) -} - -// reconcileOnceBuilt handles enqueued messages that have an image. -func (c *Controller) reconcileOnceBuilt(ctx context.Context, rev *v1alpha1.Revision, ns string) error { - logger := logging.FromContext(ctx) - accessor, err := meta.Accessor(rev) - if err != nil { - logger.Panic("Failed to get metadata", zap.Error(err)) - } - - deletionTimestamp := accessor.GetDeletionTimestamp() - logger.Infof("Check the deletionTimestamp: %s\n", deletionTimestamp) - - elaNS := controller.GetElaNamespaceName(rev.Namespace) - - if deletionTimestamp == nil && rev.Spec.ServingState == v1alpha1.RevisionServingStateActive { - logger.Info("Creating or reconciling resources for revision") - return c.createK8SResources(ctx, rev, elaNS) - } - return c.deleteK8SResources(ctx, rev, elaNS) -} - -func (c *Controller) deleteK8SResources(ctx context.Context, rev *v1alpha1.Revision, ns string) error { - logger := logging.FromContext(ctx) - logger.Info("Deleting the resources for revision") - err := c.deleteDeployment(ctx, rev, ns) - if err != nil { - logger.Error("Failed to delete a deployment", zap.Error(err)) - } - logger.Info("Deleted deployment") - - err = c.deleteAutoscalerDeployment(ctx, rev) - if err != nil { - logger.Error("Failed to delete autoscaler Deployment", zap.Error(err)) - } - logger.Info("Deleted autoscaler Deployment") - - err = c.deleteAutoscalerService(ctx, rev) - if err != nil { - logger.Error("Failed to delete autoscaler Service", zap.Error(err)) - } - logger.Info("Deleted autoscaler Service") - - err = c.deleteService(ctx, rev, ns) - if err != nil { - logger.Error("Failed to delete k8s service", zap.Error(err)) - } - logger.Info("Deleted service") - - // And the deployment is no longer ready, so update that - rev.Status.SetCondition( - &v1alpha1.RevisionCondition{ - Type: v1alpha1.RevisionConditionReady, - Status: corev1.ConditionFalse, - Reason: "Inactive", - }) - logger.Infof("Updating status with the following conditions %+v", rev.Status.Conditions) - if _, err := c.updateStatus(rev); err != nil { - logger.Error("Error recording inactivation of revision", zap.Error(err)) - return err - } - - return nil -} - -func (c *Controller) createK8SResources(ctx context.Context, rev *v1alpha1.Revision, ns string) error { - logger := logging.FromContext(ctx) - // Fire off a Deployment.. - if err := c.reconcileDeployment(ctx, rev, ns); err != nil { - logger.Error("Failed to create a deployment", zap.Error(err)) - return err - } - - // Autoscale the service - if err := c.reconcileAutoscalerDeployment(ctx, rev); err != nil { - logger.Error("Failed to create autoscaler Deployment", zap.Error(err)) - } - if err := c.reconcileAutoscalerService(ctx, rev); err != nil { - logger.Error("Failed to create autoscaler Service", zap.Error(err)) - } - if c.controllerConfig.EnableVarLogCollection { - if err := c.reconcileFluentdConfigMap(ctx, rev); err != nil { - logger.Error("Failed to create fluent config map", zap.Error(err)) - } - } - - // Create k8s service - serviceName, err := c.reconcileService(ctx, rev, ns) - if err != nil { - logger.Error("Failed to create k8s service", zap.Error(err)) - } else { - rev.Status.ServiceName = serviceName - } - - // Check to see if the revision has already been marked as ready and - // don't mark it if it's already ready. - // TODO: could always fetch the endpoint again and double-check it is still - // ready. - if rev.Status.IsReady() { - return nil - } - - // Checking existing revision condition to see if it is the initial deployment or - // during the reactivating process. If a revision is in condition "Inactive" or "Activating", - // we need to route traffic to the activator; if a revision is in condition "Deploying", - // we need to route traffic to the revision directly. - reason := "Deploying" - cond := rev.Status.GetCondition(v1alpha1.RevisionConditionReady) - if cond != nil { - if (cond.Reason == "Inactive" && cond.Status == corev1.ConditionFalse) || - (cond.Reason == "Activating" && cond.Status == corev1.ConditionUnknown) { - reason = "Activating" - } - } - - // By updating our deployment status we will trigger a Reconcile() - // that will watch for service to become ready for serving traffic. - rev.Status.SetCondition( - &v1alpha1.RevisionCondition{ - Type: v1alpha1.RevisionConditionReady, - Status: corev1.ConditionUnknown, - Reason: reason, - }) - logger.Infof("Updating status with the following conditions %+v", rev.Status.Conditions) - if _, err := c.updateStatus(rev); err != nil { - logger.Error("Error recording build completion", zap.Error(err)) - return err - } - - return nil -} - -func (c *Controller) deleteDeployment(ctx context.Context, rev *v1alpha1.Revision, ns string) error { - logger := logging.FromContext(ctx) - deploymentName := controller.GetRevisionDeploymentName(rev) - dc := c.KubeClientSet.AppsV1().Deployments(ns) - if _, err := dc.Get(deploymentName, metav1.GetOptions{}); err != nil && apierrs.IsNotFound(err) { - return nil - } - - logger.Infof("Deleting Deployment %q", deploymentName) - tmp := metav1.DeletePropagationForeground - err := dc.Delete(deploymentName, &metav1.DeleteOptions{ - PropagationPolicy: &tmp, - }) - if err != nil && !apierrs.IsNotFound(err) { - logger.Errorf("deployments.Delete for %q failed: %s", deploymentName, err) - return err - } - return nil -} - -func (c *Controller) reconcileDeployment(ctx context.Context, rev *v1alpha1.Revision, ns string) error { - logger := logging.FromContext(ctx) - dc := c.KubeClientSet.AppsV1().Deployments(ns) - // First, check if deployment exists already. - deploymentName := controller.GetRevisionDeploymentName(rev) - - if _, err := dc.Get(deploymentName, metav1.GetOptions{}); err != nil { - if !apierrs.IsNotFound(err) { - logger.Errorf("deployments.Get for %q failed: %s", deploymentName, err) - return err - } - logger.Infof("Deployment %q doesn't exist, creating", deploymentName) - } else { - // TODO(mattmoor): Compare the deployments and update if it has changed - // out from under us. - logger.Infof("Found existing deployment %q", deploymentName) - return nil - } - - // Create the deployment. - controllerRef := controller.NewRevisionControllerRef(rev) - // Create a single pod so that it gets created before deployment->RS to try to speed - // things up - podSpec := MakeElaPodSpec(rev, c.controllerConfig) - deployment := MakeElaDeployment(rev, ns) - deployment.OwnerReferences = append(deployment.OwnerReferences, *controllerRef) - - deployment.Spec.Template.Spec = *podSpec - - // Resolve tag image references to digests. - if err := c.resolver.Resolve(deployment); err != nil { - logger.Error("Error resolving deployment", zap.Error(err)) - rev.Status.SetCondition( - &v1alpha1.RevisionCondition{ - Type: v1alpha1.RevisionConditionContainerHealthy, - Status: corev1.ConditionFalse, - Reason: "ContainerMissing", - Message: err.Error(), - }) - rev.Status.SetCondition( - &v1alpha1.RevisionCondition{ - Type: v1alpha1.RevisionConditionReady, - Status: corev1.ConditionFalse, - Reason: "ContainerMissing", - Message: err.Error(), - }) - if _, err := c.updateStatus(rev); err != nil { - logger.Error("Error recording resolution problem", zap.Error(err)) - return err - } - return err - } - - // Set the ProgressDeadlineSeconds - deployment.Spec.ProgressDeadlineSeconds = new(int32) - *deployment.Spec.ProgressDeadlineSeconds = progressDeadlineSeconds - - logger.Infof("Creating Deployment: %q", deployment.Name) - _, createErr := dc.Create(deployment) - - return createErr -} - -func (c *Controller) deleteService(ctx context.Context, rev *v1alpha1.Revision, ns string) error { - logger := logging.FromContext(ctx) - sc := c.KubeClientSet.Core().Services(ns) - serviceName := controller.GetElaK8SServiceNameForRevision(rev) - - logger.Infof("Deleting service %q", serviceName) - tmp := metav1.DeletePropagationForeground - err := sc.Delete(serviceName, &metav1.DeleteOptions{ - PropagationPolicy: &tmp, - }) - if err != nil && !apierrs.IsNotFound(err) { - logger.Errorf("service.Delete for %q failed: %s", serviceName, err) - return err - } - return nil -} - -func (c *Controller) reconcileService(ctx context.Context, rev *v1alpha1.Revision, ns string) (string, error) { - logger := logging.FromContext(ctx) - sc := c.KubeClientSet.Core().Services(ns) - serviceName := controller.GetElaK8SServiceNameForRevision(rev) - - if _, err := sc.Get(serviceName, metav1.GetOptions{}); err != nil { - if !apierrs.IsNotFound(err) { - logger.Errorf("services.Get for %q failed: %s", serviceName, err) - return "", err - } - logger.Infof("serviceName %q doesn't exist, creating", serviceName) - } else { - // TODO(vaikas): Check that the service is legit and matches what we expect - // to have there. - logger.Infof("Found existing service %q", serviceName) - return serviceName, nil - } - - controllerRef := controller.NewRevisionControllerRef(rev) - service := MakeRevisionK8sService(rev, ns) - service.OwnerReferences = append(service.OwnerReferences, *controllerRef) - logger.Infof("Creating service: %q", service.Name) - _, err := sc.Create(service) - return serviceName, err -} -func (c *Controller) reconcileFluentdConfigMap(ctx context.Context, rev *v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - ns := rev.Namespace - - // One ConfigMap for Fluentd sidecar per namespace. It has multiple owner - // references. Can not set blockOwnerDeletion and Controller to true. - revRef := newRevisionNonControllerRef(rev) - - cmc := c.KubeClientSet.Core().ConfigMaps(ns) - configMap, err := cmc.Get(fluentdConfigMapName, metav1.GetOptions{}) - if err != nil { - if !apierrs.IsNotFound(err) { - logger.Errorf("configmaps.Get for %q failed: %s", fluentdConfigMapName, err) - return err - } - // ConfigMap doesn't exist, going to create it - configMap = MakeFluentdConfigMap(ns, c.controllerConfig.FluentdSidecarOutputConfig) - configMap.OwnerReferences = append(configMap.OwnerReferences, *revRef) - logger.Infof("Creating configmap: %q", configMap.Name) - _, err = cmc.Create(configMap) - return err - } - - // ConfigMap exists, going to update it - desiredConfigMap := configMap.DeepCopy() - desiredConfigMap.Data = map[string]string{ - "varlog.conf": makeFullFluentdConfig(c.controllerConfig.FluentdSidecarOutputConfig), - } - addOwnerReference(desiredConfigMap, revRef) - if !reflect.DeepEqual(desiredConfigMap, configMap) { - logger.Infof("Updating configmap: %q", desiredConfigMap.Name) - _, err = cmc.Update(desiredConfigMap) return err } - return nil -} - -func newRevisionNonControllerRef(rev *v1alpha1.Revision) *metav1.OwnerReference { - blockOwnerDeletion := false - isController := false - revRef := controller.NewRevisionControllerRef(rev) - revRef.BlockOwnerDeletion = &blockOwnerDeletion - revRef.Controller = &isController - return revRef -} - -func addOwnerReference(configMap *corev1.ConfigMap, ownerReference *metav1.OwnerReference) { - isOwner := false - for _, existingOwner := range configMap.OwnerReferences { - if ownerReference.Name == existingOwner.Name { - isOwner = true - break - } - } - if !isOwner { - configMap.OwnerReferences = append(configMap.OwnerReferences, *ownerReference) - } -} -func (c *Controller) deleteAutoscalerService(ctx context.Context, rev *v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - autoscalerName := controller.GetRevisionAutoscalerName(rev) - sc := c.KubeClientSet.Core().Services(AutoscalerNamespace) - if _, err := sc.Get(autoscalerName, metav1.GetOptions{}); err != nil && apierrs.IsNotFound(err) { - return nil - } - logger.Infof("Deleting autoscaler Service %q", autoscalerName) - tmp := metav1.DeletePropagationForeground - err := sc.Delete(autoscalerName, &metav1.DeleteOptions{ - PropagationPolicy: &tmp, - }) - if err != nil && !apierrs.IsNotFound(err) { - logger.Errorf("Autoscaler Service delete for %q failed: %s", autoscalerName, err) - return err - } - return nil -} - -func (c *Controller) reconcileAutoscalerService(ctx context.Context, rev *v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - autoscalerName := controller.GetRevisionAutoscalerName(rev) - sc := c.KubeClientSet.Core().Services(AutoscalerNamespace) - _, err := sc.Get(autoscalerName, metav1.GetOptions{}) - if err != nil { - if !apierrs.IsNotFound(err) { - logger.Errorf("Autoscaler Service get for %q failed: %s", autoscalerName, err) - return err - } - logger.Infof("Autoscaler Service %q doesn't exist, creating", autoscalerName) - } else { - logger.Infof("Found existing autoscaler Service %q", autoscalerName) - return nil - } - - controllerRef := controller.NewRevisionControllerRef(rev) - service := MakeElaAutoscalerService(rev) - service.OwnerReferences = append(service.OwnerReferences, *controllerRef) - logger.Infof("Creating autoscaler Service: %q", service.Name) - _, err = sc.Create(service) - return err -} - -func (c *Controller) deleteAutoscalerDeployment(ctx context.Context, rev *v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - autoscalerName := controller.GetRevisionAutoscalerName(rev) - dc := c.KubeClientSet.AppsV1().Deployments(AutoscalerNamespace) - _, err := dc.Get(autoscalerName, metav1.GetOptions{}) - if err != nil && apierrs.IsNotFound(err) { - return nil - } - logger.Infof("Deleting autoscaler Deployment %q", autoscalerName) - tmp := metav1.DeletePropagationForeground - err = dc.Delete(autoscalerName, &metav1.DeleteOptions{ - PropagationPolicy: &tmp, - }) - if err != nil && !apierrs.IsNotFound(err) { - logger.Errorf("Autoscaler Deployment delete for %q failed: %s", autoscalerName, err) - return err - } - return nil -} - -func (c *Controller) reconcileAutoscalerDeployment(ctx context.Context, rev *v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - autoscalerName := controller.GetRevisionAutoscalerName(rev) - dc := c.KubeClientSet.AppsV1().Deployments(AutoscalerNamespace) - _, err := dc.Get(autoscalerName, metav1.GetOptions{}) - if err != nil { - if !apierrs.IsNotFound(err) { - logger.Errorf("Autoscaler Deployment get for %q failed: %s", autoscalerName, err) + for _, dr := range c.receivers { + // Don't modify the informer's copy, and give each receiver a fresh copy. + cp := original.DeepCopy() + if err := dr.SyncRevision(cp); err != nil { return err } - logger.Infof("Autoscaler Deployment %q doesn't exist, creating", autoscalerName) - } else { - logger.Infof("Found existing autoscaler Deployment %q", autoscalerName) - return nil - } - - controllerRef := controller.NewRevisionControllerRef(rev) - deployment := MakeElaAutoscalerDeployment(rev, c.controllerConfig.AutoscalerImage) - deployment.OwnerReferences = append(deployment.OwnerReferences, *controllerRef) - logger.Infof("Creating autoscaler Deployment: %q", deployment.Name) - _, err = dc.Create(deployment) - return err -} - -func (c *Controller) removeFinalizers(ctx context.Context, rev *v1alpha1.Revision, ns string) error { - logger := logging.FromContext(ctx) - logger.Infof("Removing finalizers for %q\n", rev.Name) - accessor, err := meta.Accessor(rev) - if err != nil { - logger.Panic("Failed to get metadata", zap.Error(err)) - } - finalizers := accessor.GetFinalizers() - for i, v := range finalizers { - if v == "controller" { - finalizers = append(finalizers[:i], finalizers[i+1:]...) - } } - accessor.SetFinalizers(finalizers) - prClient := c.ElaClientSet.ServingV1alpha1().Revisions(rev.Namespace) - prClient.Update(rev) - logger.Infof("The finalizer 'controller' is removed.") - return nil } - -func (c *Controller) updateStatus(rev *v1alpha1.Revision) (*v1alpha1.Revision, error) { - prClient := c.ElaClientSet.ServingV1alpha1().Revisions(rev.Namespace) - newRev, err := prClient.Get(rev.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - newRev.Status = rev.Status - - // TODO: for CRD there's no updatestatus, so use normal update - return prClient.Update(newRev) - // return prClient.UpdateStatus(newRev) -} - -// Given an endpoint see if it's managed by us and return the -// revision that created it. -// TODO: Consider using OwnerReferences. -// https://github.com/kubernetes/sample-controller/blob/master/controller.go#L373-L384 -func lookupServiceOwner(endpoint *corev1.Endpoints) string { - // see if there's a label on this object marking it as ours. - if revisionName, ok := endpoint.Labels[serving.RevisionLabelKey]; ok { - return revisionName - } - return "" -} diff --git a/pkg/controller/route/queueing_test.go b/pkg/controller/route/queueing_test.go index 98878448c6de..0d8ac84b0fcc 100644 --- a/pkg/controller/route/queueing_test.go +++ b/pkg/controller/route/queueing_test.go @@ -17,15 +17,25 @@ limitations under the License. package route import ( + "context" + "fmt" "testing" "time" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + kubeinformers "k8s.io/client-go/informers" + fakekubeclientset "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" "github.com/knative/serving/pkg/apis/serving/v1alpha1" - corev1 "k8s.io/api/core/v1" - + fakeclientset "github.com/knative/serving/pkg/client/clientset/versioned/fake" + informers "github.com/knative/serving/pkg/client/informers/externalversions" + ctrl "github.com/knative/serving/pkg/controller" . "github.com/knative/serving/pkg/controller/testing" + "github.com/knative/serving/pkg/logging" ) /* TODO tests: @@ -36,6 +46,129 @@ import ( - resource doesn't exist in lister (from syncHandler) */ +const ( + testNamespace string = "test" + defaultDomainSuffix string = "test-domain.dev" + prodDomainSuffix string = "prod-domain.com" +) + +var ( + testLogger = zap.NewNop().Sugar() + testCtx = logging.WithLogger(context.Background(), testLogger) +) + +func getTestRevision(name string) *v1alpha1.Revision { + return &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + SelfLink: fmt.Sprintf("/apis/serving/v1alpha1/namespaces/test/revisions/%s", name), + Name: name, + Namespace: testNamespace, + }, + Spec: v1alpha1.RevisionSpec{ + Container: corev1.Container{ + Image: "test-image", + }, + }, + Status: v1alpha1.RevisionStatus{ + ServiceName: fmt.Sprintf("%s-service", name), + Conditions: []v1alpha1.RevisionCondition{{ + Type: v1alpha1.RevisionConditionReady, + Status: corev1.ConditionTrue, + Reason: "ServiceReady", + }}, + }, + } +} + +func getTestRouteWithTrafficTargets(traffic []v1alpha1.TrafficTarget) *v1alpha1.Route { + return &v1alpha1.Route{ + ObjectMeta: metav1.ObjectMeta{ + SelfLink: "/apis/serving/v1alpha1/namespaces/test/Routes/test-route", + Name: "test-route", + Namespace: testNamespace, + Labels: map[string]string{ + "route": "test-route", + }, + }, + Spec: v1alpha1.RouteSpec{ + Traffic: traffic, + }, + } +} + +type receiver struct { + kubeClient *fakekubeclientset.Clientset +} + +func (r *receiver) SyncRoute(route *v1alpha1.Route) error { + _, err := r.kubeClient.Core().Services(route.Namespace).Create(&corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: route.Namespace, + Name: route.Name, + }, + }) + return err +} + +var _ Receiver = (*receiver)(nil) + +func newRunningTestController(t *testing.T, elaObjects ...runtime.Object) ( + kubeClient *fakekubeclientset.Clientset, + elaClient *fakeclientset.Clientset, + controller *Controller, + kubeInformer kubeinformers.SharedInformerFactory, + elaInformer informers.SharedInformerFactory, + stopCh chan struct{}) { + + // Create fake clients + kubeClient = fakekubeclientset.NewSimpleClientset() + elaClient = fakeclientset.NewSimpleClientset(elaObjects...) + + // Create informer factories with fake clients. The second parameter sets the + // resync period to zero, disabling it. + kubeInformer = kubeinformers.NewSharedInformerFactory(kubeClient, 0) + elaInformer = informers.NewSharedInformerFactory(elaClient, 0) + + c, err := NewController( + kubeClient, + elaClient, + kubeInformer, + elaInformer, + &rest.Config{}, + ctrl.Config{ + Domains: map[string]*ctrl.LabelSelector{ + defaultDomainSuffix: &ctrl.LabelSelector{}, + prodDomainSuffix: &ctrl.LabelSelector{ + Selector: map[string]string{ + "app": "prod", + }, + }, + }, + }, + testLogger, + &receiver{kubeClient}, + ) + if err != nil { + t.Fatalf("Error creating controller: %v", err) + } + controller = c.(*Controller) + + // Start the informers. This must happen after the call to NewController, + // otherwise there are no informers to be started. + stopCh = make(chan struct{}) + kubeInformer.Start(stopCh) + elaInformer.Start(stopCh) + + // Run the controller. + go func() { + if err := controller.Run(2, stopCh); err != nil { + t.Fatalf("Error running controller: %v", err) + } + }() + + return +} + func TestNewRouteCallsSyncHandler(t *testing.T) { // A standalone revision rev := getTestRevision("test-rev") diff --git a/pkg/controller/route/route.go b/pkg/controller/route/route.go index fe0c0d439a4e..67e98f4d1e67 100644 --- a/pkg/controller/route/route.go +++ b/pkg/controller/route/route.go @@ -1,5 +1,5 @@ /* -Copyright 2018 Google LLC +Copyright 2018 Google LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,67 +17,31 @@ limitations under the License. package route import ( - "context" - "errors" "fmt" - "reflect" - "github.com/josephburnett/k8sflag/pkg/k8sflag" - corev1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/runtime" - kubeinformers "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - - "github.com/knative/serving/pkg/apis/serving" "github.com/knative/serving/pkg/apis/serving/v1alpha1" clientset "github.com/knative/serving/pkg/client/clientset/versioned" informers "github.com/knative/serving/pkg/client/informers/externalversions" listers "github.com/knative/serving/pkg/client/listers/serving/v1alpha1" "github.com/knative/serving/pkg/controller" - "github.com/knative/serving/pkg/logging" - "github.com/knative/serving/pkg/logging/logkey" - "go.opencensus.io/stats" - "go.opencensus.io/tag" "go.uber.org/zap" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/runtime" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" ) -var ( - processItemCount = stats.Int64( - "controller_route_queue_process_count", - "Counter to keep track of items in the route work queue.", - stats.UnitNone) - statusTagKey tag.Key -) - -const ( - controllerAgentName = "route-controller" -) +const controllerAgentName = "route-controller" -// RevisionRoute represents a single target to route to. -// Basically represents a k8s service representing a specific Revision -// and how much of the traffic goes to it. -type RevisionRoute struct { - // Name for external routing. Optional - Name string - // RevisionName is the underlying revision that we're currently - // routing to. Could be resolved from the Configuration or - // specified explicitly in TrafficTarget - RevisionName string - // Service is the name of the k8s service we route to. - // Note we should not put service namespace as a suffix in this field. - Service string - // The k8s service namespace - Namespace string - Weight int +// Receiver defines the interface that a receiver must implement to receive events +// from this Controller. +type Receiver interface { + SyncRoute(*v1alpha1.Route) error } -// Controller implements the controller for Route resources. -// +controller:group=ela,version=v1alpha1,kind=Route,resource=routes +// Controller implements the controller for Route resources type Controller struct { *controller.Base @@ -85,21 +49,10 @@ type Controller struct { lister listers.RouteLister synced cache.InformerSynced - // don't start the workers until configuration cache have been synced - configSynced cache.InformerSynced - - // suffix used to construct Route domain. - controllerConfig controller.Config - - // Autoscale enable scale to zero experiment flag. - enableScaleToZero *k8sflag.BoolFlag + receivers []Receiver } -// NewController initializes the controller and is called by the generated code -// Registers eventhandlers to enqueue events -// config - client configuration for talking to the apiserver -// si - informer factory shared across all controllers for listening to events and indexing resource properties -// reconcileKey - function for mapping queue keys to resource names +// NewController creates a new Route controller func NewController( kubeClientSet kubernetes.Interface, elaClientSet clientset.Interface, @@ -107,34 +60,29 @@ func NewController( elaInformerFactory informers.SharedInformerFactory, config *rest.Config, controllerConfig controller.Config, - enableScaleToZero *k8sflag.BoolFlag, - logger *zap.SugaredLogger) controller.Interface { + logger *zap.SugaredLogger, + receivers ...interface{}) (controller.Interface, error) { - // obtain references to a shared index informer for the Routes and - // Configurations type. + // obtain references to a shared index informer for the Route type. informer := elaInformerFactory.Serving().V1alpha1().Routes() - configInformer := elaInformerFactory.Serving().V1alpha1().Configurations() - ingressInformer := kubeInformerFactory.Extensions().V1beta1().Ingresses() controller := &Controller{ Base: controller.NewBase(kubeClientSet, elaClientSet, kubeInformerFactory, elaInformerFactory, informer.Informer(), controllerAgentName, "Routes", logger), - lister: informer.Lister(), - synced: informer.Informer().HasSynced, - configSynced: configInformer.Informer().HasSynced, - controllerConfig: controllerConfig, - enableScaleToZero: enableScaleToZero, + lister: informer.Lister(), + synced: informer.Informer().HasSynced, } - controller.Logger.Info("Setting up event handlers") - configInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.addConfigurationEvent, - UpdateFunc: controller.updateConfigurationEvent, - }) - ingressInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - UpdateFunc: controller.updateIngressEvent, - }) - return controller + for _, rcv := range receivers { + if dr, ok := rcv.(Receiver); ok { + controller.receivers = append(controller.receivers, dr) + } + } + if len(controller.receivers) == 0 { + return nil, fmt.Errorf("None of the provided receivers implement route.Receiver. " + + "If the Route controller is no longer needed it should be removed.") + } + return controller, nil } // Run will set up the event handlers for types we are interested in, as well @@ -142,19 +90,14 @@ func NewController( // is closed, at which point it will shutdown the workqueue and wait for // workers to finish processing their current work items. func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { - return c.RunController(threadiness, stopCh, []cache.InformerSynced{c.synced, c.configSynced}, - c.updateRouteEvent, "Route") + return c.RunController(threadiness, stopCh, + []cache.InformerSynced{c.synced}, c.syncHandler, "Route") } -// loggerWithRouteInfo enriches the logs with route name and namespace. -func loggerWithRouteInfo(logger *zap.SugaredLogger, ns string, name string) *zap.SugaredLogger { - return logger.With(zap.String(logkey.Namespace, ns), zap.String(logkey.Route, name)) -} - -// updateRouteEvent compares the actual state with the desired, and attempts to -// converge the two. It then updates the Status block of the Route resource -// with the current status of the resource. -func (c *Controller) updateRouteEvent(key string) error { +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Route +// resource with the current status of the resource. +func (c *Controller) syncHandler(key string) error { // Convert the namespace/name string into a distinct namespace and name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { @@ -162,19 +105,12 @@ func (c *Controller) updateRouteEvent(key string) error { return nil } - logger := loggerWithRouteInfo(c.Logger, namespace, name) - ctx := logging.WithLogger(context.TODO(), logger) - // Get the Route resource with this namespace/name - route, err := c.lister.Routes(namespace).Get(name) - - // Don't modify the informers copy - route = route.DeepCopy() - + original, err := c.lister.Routes(namespace).Get(name) if err != nil { // The resource may no longer exist, in which case we stop // processing. - if apierrs.IsNotFound(err) { + if errors.IsNotFound(err) { runtime.HandleError(fmt.Errorf("route %q in work queue no longer exists", key)) return nil } @@ -182,775 +118,12 @@ func (c *Controller) updateRouteEvent(key string) error { return err } - logger.Infof("Reconciling route :%v", route) - - // Create a placeholder service that is simply used by istio as a placeholder. - // This service could eventually be the 'activator' service that will get all the - // fallthrough traffic if there are no route rules (revisions to target). - // This is one way to implement the 0->1. For now, we'll just create a placeholder - // that selects nothing. - logger.Info("Creating/Updating placeholder k8s services") - - if err = c.reconcilePlaceholderService(ctx, route); err != nil { - return err - } - - // Call syncTrafficTargetsAndUpdateRouteStatus, which also updates the Route.Status - // to contain the domain we will use for Ingress creation. - - if _, err = c.syncTrafficTargetsAndUpdateRouteStatus(ctx, route); err != nil { - return err - } - - // Then create or update the Ingress rule for this service - logger.Info("Creating or updating ingress rule") - if err = c.reconcileIngress(ctx, route); err != nil { - logger.Error("Failed to create or update ingress rule", zap.Error(err)) - return err - } - - logger.Info("Route successfully synced") - return nil -} - -func (c *Controller) routeDomain(route *v1alpha1.Route) string { - domain := c.controllerConfig.LookupDomainForLabels(route.ObjectMeta.Labels) - return fmt.Sprintf("%s.%s.%s", route.Name, route.Namespace, domain) -} - -// syncTrafficTargetsAndUpdateRouteStatus attempts to converge the actual state and desired state -// according to the traffic targets in Spec field for Route resource. It then updates the Status -// block of the Route and returns the updated one. -func (c *Controller) syncTrafficTargetsAndUpdateRouteStatus(ctx context.Context, route *v1alpha1.Route) (*v1alpha1.Route, error) { - logger := logging.FromContext(ctx) - c.consolidateTrafficTargets(ctx, route) - configMap, revMap, err := c.getDirectTrafficTargets(ctx, route) - if err != nil { - return nil, err - } - if err := c.extendConfigurationsWithIndirectTrafficTargets(ctx, route, configMap, revMap); err != nil { - return nil, err - } - if err := c.extendRevisionsWithIndirectTrafficTargets(ctx, route, configMap, revMap); err != nil { - return nil, err - } - - if err := c.deleteLabelForOutsideOfGivenConfigurations(ctx, route, configMap); err != nil { - return nil, err - } - if err := c.setLabelForGivenConfigurations(ctx, route, configMap); err != nil { - return nil, err - } - - if err := c.deleteLabelForOutsideOfGivenRevisions(ctx, route, revMap); err != nil { - return nil, err - } - if err := c.setLabelForGivenRevisions(ctx, route, revMap); err != nil { - return nil, err - } - - // Then create the actual route rules. - logger.Info("Creating Istio route rules") - revisionRoutes, err := c.createOrUpdateRouteRules(ctx, route, configMap, revMap) - if err != nil { - logger.Error("Failed to create routes", zap.Error(err)) - return nil, err - } - - // If revision routes were configured, update them - if revisionRoutes != nil { - traffic := []v1alpha1.TrafficTarget{} - for _, r := range revisionRoutes { - traffic = append(traffic, v1alpha1.TrafficTarget{ - Name: r.Name, - RevisionName: r.RevisionName, - Percent: r.Weight, - }) - } - route.Status.Traffic = traffic - } - route.Status.Domain = c.routeDomain(route) - updated, err := c.updateStatus(route) - if err != nil { - logger.Warn("Failed to update service status", zap.Error(err)) - c.Recorder.Eventf(route, corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for route %q: %v", route.Name, err) - return nil, err - } - c.Recorder.Eventf(route, corev1.EventTypeNormal, "Updated", "Updated status for route %q", route.Name) - return updated, nil -} - -func (c *Controller) reconcilePlaceholderService(ctx context.Context, route *v1alpha1.Route) error { - logger := logging.FromContext(ctx) - service := MakeRouteK8SService(route) - if _, err := c.KubeClientSet.Core().Services(route.Namespace).Create(service); err != nil { - if apierrs.IsAlreadyExists(err) { - // Service already exist. - return nil - } - logger.Error("Failed to create service", zap.Error(err)) - c.Recorder.Eventf(route, corev1.EventTypeWarning, "CreationFailed", "Failed to create service %q: %v", service.Name, err) - return err - } - logger.Infof("Created service %s", service.Name) - c.Recorder.Eventf(route, corev1.EventTypeNormal, "Created", "Created service %q", service.Name) - return nil -} - -func (c *Controller) reconcileIngress(ctx context.Context, route *v1alpha1.Route) error { - logger := logging.FromContext(ctx) - ingressNamespace := route.Namespace - ingressName := controller.GetElaK8SIngressName(route) - ingress := MakeRouteIngress(route) - ingressClient := c.KubeClientSet.Extensions().Ingresses(ingressNamespace) - existing, err := ingressClient.Get(ingressName, metav1.GetOptions{}) - - if err != nil { - if apierrs.IsNotFound(err) { - if _, err = ingressClient.Create(ingress); err == nil { - logger.Infof("Created ingress %q in namespace %q", ingressName, ingressNamespace) - c.Recorder.Eventf(route, corev1.EventTypeNormal, "Created", "Created Ingress %q in namespace %q", ingressName, ingressNamespace) - } - } - return err - } - // Check if there is anything to update. - if !reflect.DeepEqual(existing.Spec, ingress.Spec) { - existing.Spec = ingress.Spec - if _, err = ingressClient.Update(existing); err == nil { - logger.Infof("Updated ingress %q in namespace %q", ingressName, ingressNamespace) - c.Recorder.Eventf(route, corev1.EventTypeNormal, "Updated", "Updated Ingress %q in namespace %q", ingressName, ingressNamespace) - } - return err - } - return nil -} - -func (c *Controller) getDirectTrafficTargets(ctx context.Context, route *v1alpha1.Route) ( - map[string]*v1alpha1.Configuration, map[string]*v1alpha1.Revision, error) { - logger := logging.FromContext(ctx) - ns := route.Namespace - configClient := c.ElaClientSet.ServingV1alpha1().Configurations(ns) - revClient := c.ElaClientSet.ServingV1alpha1().Revisions(ns) - configMap := map[string]*v1alpha1.Configuration{} - revMap := map[string]*v1alpha1.Revision{} - - for _, tt := range route.Spec.Traffic { - if tt.ConfigurationName != "" { - configName := tt.ConfigurationName - config, err := configClient.Get(configName, metav1.GetOptions{}) - if err != nil { - logger.Infof("Failed to fetch Configuration %q: %v", configName, err) - return nil, nil, err - } - configMap[configName] = config - } else { - revName := tt.RevisionName - rev, err := revClient.Get(revName, metav1.GetOptions{}) - if err != nil { - logger.Infof("Failed to fetch Revision %q: %v", revName, err) - return nil, nil, err - } - revMap[revName] = rev - } - } - - return configMap, revMap, nil -} - -func (c *Controller) extendConfigurationsWithIndirectTrafficTargets( - ctx context.Context, route *v1alpha1.Route, configMap map[string]*v1alpha1.Configuration, - revMap map[string]*v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - ns := route.Namespace - configClient := c.ElaClientSet.ServingV1alpha1().Configurations(ns) - - // Get indirect configurations. - for _, rev := range revMap { - if configName, ok := rev.Labels[serving.ConfigurationLabelKey]; ok { - if _, ok := configMap[configName]; !ok { - // This is not a duplicated configuration - config, err := configClient.Get(configName, metav1.GetOptions{}) - if err != nil { - logger.Errorf("Failed to fetch Configuration %s: %s", configName, err) - return err - } - configMap[configName] = config - } - } else { - logger.Infof("Revision %s does not have label %s", rev.Name, serving.ConfigurationLabelKey) - } - } - - return nil -} - -func (c *Controller) extendRevisionsWithIndirectTrafficTargets( - ctx context.Context, route *v1alpha1.Route, configMap map[string]*v1alpha1.Configuration, - revMap map[string]*v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - ns := route.Namespace - revisionClient := c.ElaClientSet.ServingV1alpha1().Revisions(ns) - - for _, tt := range route.Spec.Traffic { - if tt.ConfigurationName != "" { - configName := tt.ConfigurationName - if config, ok := configMap[configName]; ok { - - revName := config.Status.LatestReadyRevisionName - if revName == "" { - logger.Infof("Configuration %s is not ready. Skipping Configuration %q during route reconcile", - tt.ConfigurationName) - continue - } - // Check if it is a duplicated revision - if _, ok := revMap[revName]; !ok { - rev, err := revisionClient.Get(revName, metav1.GetOptions{}) - if err != nil { - logger.Errorf("Failed to fetch Revision %s: %s", revName, err) - return err - } - revMap[revName] = rev - } - } - } - } - - return nil -} - -func (c *Controller) setLabelForGivenConfigurations( - ctx context.Context, route *v1alpha1.Route, configMap map[string]*v1alpha1.Configuration) error { - logger := logging.FromContext(ctx) - configClient := c.ElaClientSet.ServingV1alpha1().Configurations(route.Namespace) - - // Validate - for _, config := range configMap { - if routeName, ok := config.Labels[serving.RouteLabelKey]; ok { - // TODO(yanweiguo): add a condition in status for this error - if routeName != route.Name { - errMsg := fmt.Sprintf("Configuration %q is already in use by %q, and cannot be used by %q", - config.Name, routeName, route.Name) - c.Recorder.Event(route, corev1.EventTypeWarning, "ConfigurationInUse", errMsg) - logger.Error(errMsg) - return errors.New(errMsg) - } - } - } - - // Set label for newly added configurations as traffic target. - for _, config := range configMap { - if config.Labels == nil { - config.Labels = make(map[string]string) - } else if _, ok := config.Labels[serving.RouteLabelKey]; ok { - continue - } - config.Labels[serving.RouteLabelKey] = route.Name - if _, err := configClient.Update(config); err != nil { - logger.Errorf("Failed to update Configuration %s: %s", config.Name, err) + for _, dr := range c.receivers { + // Don't modify the informer's copy, and give each receiver a fresh copy. + cp := original.DeepCopy() + if err := dr.SyncRoute(cp); err != nil { return err } } - return nil } - -func (c *Controller) setLabelForGivenRevisions( - ctx context.Context, route *v1alpha1.Route, revMap map[string]*v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - revisionClient := c.ElaClientSet.ServingV1alpha1().Revisions(route.Namespace) - - // Validate revision if it already has a route label - for _, rev := range revMap { - if routeName, ok := rev.Labels[serving.RouteLabelKey]; ok { - if routeName != route.Name { - errMsg := fmt.Sprintf("Revision %q is already in use by %q, and cannot be used by %q", - rev.Name, routeName, route.Name) - c.Recorder.Event(route, corev1.EventTypeWarning, "RevisionInUse", errMsg) - logger.Error(errMsg) - return errors.New(errMsg) - } - } - } - - for _, rev := range revMap { - if rev.Labels == nil { - rev.Labels = make(map[string]string) - } else if _, ok := rev.Labels[serving.RouteLabelKey]; ok { - continue - } - rev.Labels[serving.RouteLabelKey] = route.Name - if _, err := revisionClient.Update(rev); err != nil { - logger.Errorf("Failed to add route label to Revision %s: %s", rev.Name, err) - return err - } - } - - return nil -} - -func (c *Controller) deleteLabelForOutsideOfGivenConfigurations( - ctx context.Context, route *v1alpha1.Route, configMap map[string]*v1alpha1.Configuration) error { - logger := logging.FromContext(ctx) - configClient := c.ElaClientSet.ServingV1alpha1().Configurations(route.Namespace) - // Get Configurations set as traffic target before this sync. - oldConfigsList, err := configClient.List( - metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s", serving.RouteLabelKey, route.Name), - }, - ) - if err != nil { - logger.Errorf("Failed to fetch configurations with label '%s=%s': %s", - serving.RouteLabelKey, route.Name, err) - return err - } - - // Delete label for newly removed configurations as traffic target. - for _, config := range oldConfigsList.Items { - if _, ok := configMap[config.Name]; !ok { - delete(config.Labels, serving.RouteLabelKey) - if _, err := configClient.Update(&config); err != nil { - logger.Errorf("Failed to update Configuration %s: %s", config.Name, err) - return err - } - } - } - - return nil -} - -func (c *Controller) deleteLabelForOutsideOfGivenRevisions( - ctx context.Context, route *v1alpha1.Route, revMap map[string]*v1alpha1.Revision) error { - logger := logging.FromContext(ctx) - revClient := c.ElaClientSet.ServingV1alpha1().Revisions(route.Namespace) - - oldRevList, err := revClient.List( - metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s", serving.RouteLabelKey, route.Name), - }, - ) - if err != nil { - logger.Errorf("Failed to fetch revisions with label '%s=%s': %s", - serving.RouteLabelKey, route.Name, err) - return err - } - - // Delete label for newly removed revisions as traffic target. - for _, rev := range oldRevList.Items { - if _, ok := revMap[rev.Name]; !ok { - delete(rev.Labels, serving.RouteLabelKey) - if _, err := revClient.Update(&rev); err != nil { - logger.Errorf("Failed to remove route label from Revision %s: %s", rev.Name, err) - return err - } - } - } - - return nil -} - -// computeRevisionRoutes computes RevisionRoute for a route object. If there is one or more inactive revisions and enableScaleToZero -// is true, a route rule with the activator service as the destination will be added. It returns the revision routes, the inactive -// revision name to which the activator should forward requests to, and error if there is any. -func (c *Controller) computeRevisionRoutes( - ctx context.Context, route *v1alpha1.Route, configMap map[string]*v1alpha1.Configuration, - revMap map[string]*v1alpha1.Revision) ([]RevisionRoute, string, error) { - - logger := logging.FromContext(ctx) - logger.Debug("Figuring out routes") - enableScaleToZero := c.enableScaleToZero.Get() - // The inactive revision name which has the largest traffic weight. - inactiveRev := "" - // The max percent in all inactive revisions. - maxInactivePercent := 0 - // The total percent of all inactive revisions. - totalInactivePercent := 0 - ns := route.Namespace - elaNS := controller.GetElaNamespaceName(ns) - ret := []RevisionRoute{} - - for _, tt := range route.Spec.Traffic { - var rev *v1alpha1.Revision - var err error - revName := tt.RevisionName - if tt.ConfigurationName != "" { - // Get the configuration's LatestReadyRevisionName - revName = configMap[tt.ConfigurationName].Status.LatestReadyRevisionName - if revName == "" { - logger.Errorf("Configuration %s is not ready. Should skip updating route rules", - tt.ConfigurationName) - return nil, "", nil - } - // Revision has been already fetched indirectly in extendRevisionsWithIndirectTrafficTargets - rev = revMap[revName] - - } else { - // Direct revision has already been fetched - rev = revMap[revName] - } - //TODO(grantr): What should happen if revisionName is empty? - - if rev == nil { - // For safety, which should never happen. - logger.Errorf("Failed to fetch Revision %s: %s", revName, err) - return nil, "", err - } - - hasRouteRule := true - cond := rev.Status.GetCondition(v1alpha1.RevisionConditionReady) - if enableScaleToZero && cond != nil { - // A revision is considered inactive (yet) if it's in - // "Inactive" condition or "Activating" condition. - if (cond.Reason == "Inactive" && cond.Status == corev1.ConditionFalse) || - (cond.Reason == "Activating" && cond.Status == corev1.ConditionUnknown) { - // Let inactiveRev be the Reserve revision with the largest traffic weight. - if tt.Percent > maxInactivePercent { - maxInactivePercent = tt.Percent - inactiveRev = rev.Name - } - totalInactivePercent += tt.Percent - hasRouteRule = false - } - } - - if hasRouteRule { - rr := RevisionRoute{ - Name: tt.Name, - RevisionName: rev.Name, - Service: rev.Status.ServiceName, - Namespace: elaNS, - Weight: tt.Percent, - } - ret = append(ret, rr) - } - } - - // TODO: The ideal solution is to append different revision name as headers for each inactive revision. - // https://github.com/knative/serving/issues/882 - if totalInactivePercent > 0 { - activatorRoute := RevisionRoute{ - Name: controller.GetElaK8SActivatorServiceName(), - RevisionName: inactiveRev, - Service: controller.GetElaK8SActivatorServiceName(), - Namespace: controller.GetElaK8SActivatorNamespace(), - Weight: totalInactivePercent, - } - ret = append(ret, activatorRoute) - } - return ret, inactiveRev, nil -} - -// computeEmptyRevisionRoutes is a hack to work around https://github.com/istio/istio/issues/5204. -// Here we add empty/dummy route rules for non-target revisions to prepare to switch traffic to -// them in the future. We are tracking this issue in https://github.com/knative/serving/issues/348. -// -// TODO: Even though this fixes the 503s when switching revisions, revisions will have empty route -// rules to them for perpetuity, therefore not ideal. We should remove this hack once -// https://github.com/istio/istio/issues/5204 is fixed, probably in 0.8.1. -func (c *Controller) computeEmptyRevisionRoutes( - ctx context.Context, route *v1alpha1.Route, configMap map[string]*v1alpha1.Configuration, - revMap map[string]*v1alpha1.Revision) ([]RevisionRoute, error) { - logger := logging.FromContext(ctx) - ns := route.Namespace - elaNS := controller.GetElaNamespaceName(ns) - revClient := c.ElaClientSet.ServingV1alpha1().Revisions(ns) - revRoutes := []RevisionRoute{} - for _, tt := range route.Spec.Traffic { - configName := tt.ConfigurationName - if configName != "" { - // Get the configuration's LatestReadyRevisionName - latestReadyRevName := configMap[configName].Status.LatestReadyRevisionName - revs, err := revClient.List(metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s", serving.ConfigurationLabelKey, configName), - }) - if err != nil { - logger.Errorf("Failed to fetch revisions of Configuration %q: %s", configName, err) - return nil, err - } - for _, rev := range revs.Items { - if _, ok := revMap[rev.Name]; !ok && rev.Name != latestReadyRevName { - // This is a non-target revision. Adding a dummy rule to make Istio happy, - // so that if we switch traffic to them later we won't cause Istio to - // throw spurious 503s. See https://github.com/istio/istio/issues/5204. - revRoutes = append(revRoutes, RevisionRoute{ - RevisionName: rev.Name, - Service: rev.Status.ServiceName, - Namespace: elaNS, - Weight: 0, - }) - } - } - } - } - return revRoutes, nil -} - -func (c *Controller) createOrUpdateRouteRules(ctx context.Context, route *v1alpha1.Route, - configMap map[string]*v1alpha1.Configuration, revMap map[string]*v1alpha1.Revision) ([]RevisionRoute, error) { - logger := logging.FromContext(ctx) - // grab a client that's specific to RouteRule. - ns := route.Namespace - routeClient := c.ElaClientSet.ConfigV1alpha2().RouteRules(ns) - if routeClient == nil { - logger.Errorf("Failed to create resource client") - return nil, fmt.Errorf("Couldn't get a routeClient") - } - - revisionRoutes, inactiveRev, err := c.computeRevisionRoutes(ctx, route, configMap, revMap) - if err != nil { - logger.Errorf("Failed to get routes for %s : %q", route.Name, err) - return nil, err - } - if len(revisionRoutes) == 0 { - logger.Errorf("No routes were found for the service %q", route.Name) - return nil, nil - } - - // TODO: remove this once https://github.com/istio/istio/issues/5204 is fixed. - emptyRoutes, err := c.computeEmptyRevisionRoutes(ctx, route, configMap, revMap) - if err != nil { - logger.Errorf("Failed to get empty routes for %s : %q", route.Name, err) - return nil, err - } - revisionRoutes = append(revisionRoutes, emptyRoutes...) - // Create route rule for the route domain - routeRuleName := controller.GetRouteRuleName(route, nil) - routeRules, err := routeClient.Get(routeRuleName, metav1.GetOptions{}) - if err != nil { - if !apierrs.IsNotFound(err) { - return nil, err - } - routeRules = MakeIstioRoutes(route, nil, ns, revisionRoutes, c.routeDomain(route), inactiveRev) - if _, err := routeClient.Create(routeRules); err != nil { - c.Recorder.Eventf(route, corev1.EventTypeWarning, "CreationFailed", "Failed to create Istio route rule %q: %s", routeRules.Name, err) - return nil, err - } - c.Recorder.Eventf(route, corev1.EventTypeNormal, "Created", "Created Istio route rule %q", routeRules.Name) - } else { - routeRules.Spec = makeIstioRouteSpec(route, nil, ns, revisionRoutes, c.routeDomain(route), inactiveRev) - if _, err := routeClient.Update(routeRules); err != nil { - c.Recorder.Eventf(route, corev1.EventTypeWarning, "UpdateFailed", "Failed to update Istio route rule %q: %s", routeRules.Name, err) - return nil, err - } - c.Recorder.Eventf(route, corev1.EventTypeNormal, "Updated", "Updated Istio route rule %q", routeRules.Name) - } - - // Create route rule for named traffic targets - for _, tt := range route.Spec.Traffic { - if tt.Name == "" { - continue - } - routeRuleName := controller.GetRouteRuleName(route, &tt) - routeRules, err := routeClient.Get(routeRuleName, metav1.GetOptions{}) - if err != nil { - if !apierrs.IsNotFound(err) { - return nil, err - } - routeRules = MakeIstioRoutes(route, &tt, ns, revisionRoutes, c.routeDomain(route), inactiveRev) - if _, err := routeClient.Create(routeRules); err != nil { - c.Recorder.Eventf(route, corev1.EventTypeWarning, "CreationFailed", "Failed to create Istio route rule %q: %s", routeRules.Name, err) - return nil, err - } - c.Recorder.Eventf(route, corev1.EventTypeNormal, "Created", "Created Istio route rule %q", routeRules.Name) - } else { - routeRules.Spec = makeIstioRouteSpec(route, &tt, ns, revisionRoutes, c.routeDomain(route), inactiveRev) - if _, err := routeClient.Update(routeRules); err != nil { - return nil, err - } - c.Recorder.Eventf(route, corev1.EventTypeNormal, "Updated", "Updated Istio route rule %q", routeRules.Name) - } - } - if err := c.removeOutdatedRouteRules(ctx, route); err != nil { - return nil, err - } - return revisionRoutes, nil -} - -func (c *Controller) updateStatus(route *v1alpha1.Route) (*v1alpha1.Route, error) { - routeClient := c.ElaClientSet.ServingV1alpha1().Routes(route.Namespace) - existing, err := routeClient.Get(route.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - // Check if there is anything to update. - if !reflect.DeepEqual(existing.Status, route.Status) { - existing.Status = route.Status - // TODO: for CRD there's no updatestatus, so use normal update. - return routeClient.Update(existing) - } - return existing, nil -} - -// consolidateTrafficTargets will consolidate all duplicate revisions -// and configurations. If the traffic target names are unique, the traffic -// targets will not be consolidated. -func (c *Controller) consolidateTrafficTargets(ctx context.Context, route *v1alpha1.Route) { - type trafficTarget struct { - name string - revisionName string - configurationName string - } - - logger := logging.FromContext(ctx) - logger.Infof("Attempting to consolidate traffic targets") - trafficTargets := route.Spec.Traffic - trafficMap := make(map[trafficTarget]int) - var order []trafficTarget - - for _, t := range trafficTargets { - tt := trafficTarget{ - name: t.Name, - revisionName: t.RevisionName, - configurationName: t.ConfigurationName, - } - if trafficMap[tt] != 0 { - logger.Infof( - "Found duplicate traffic targets (name: %s, revision: %s, configuration:%s), consolidating traffic", - tt.name, - tt.revisionName, - tt.configurationName, - ) - trafficMap[tt] += t.Percent - } else { - trafficMap[tt] = t.Percent - // The order to walk the map (Go randomizes otherwise) - order = append(order, tt) - } - } - - consolidatedTraffic := []v1alpha1.TrafficTarget{} - for _, tt := range order { - p := trafficMap[tt] - consolidatedTraffic = append( - consolidatedTraffic, - v1alpha1.TrafficTarget{ - Name: tt.name, - ConfigurationName: tt.configurationName, - RevisionName: tt.revisionName, - Percent: p, - }, - ) - } - route.Spec.Traffic = consolidatedTraffic -} - -func (c *Controller) removeOutdatedRouteRules(ctx context.Context, u *v1alpha1.Route) error { - logger := logging.FromContext(ctx) - ns := u.Namespace - routeClient := c.ElaClientSet.ConfigV1alpha2().RouteRules(ns) - if routeClient == nil { - logger.Error("Failed to create resource client") - return errors.New("Couldn't get a routeClient") - } - - routeRuleList, err := routeClient.List(metav1.ListOptions{ - LabelSelector: fmt.Sprintf("route=%s", u.Name), - }) - if err != nil { - return err - } - - routeRuleNames := map[string]struct{}{} - routeRuleNames[controller.GetRouteRuleName(u, nil)] = struct{}{} - for _, tt := range u.Spec.Traffic { - if tt.Name == "" { - continue - } - routeRuleNames[controller.GetRouteRuleName(u, &tt)] = struct{}{} - } - - for _, r := range routeRuleList.Items { - if _, ok := routeRuleNames[r.Name]; ok { - continue - } - logger.Infof("Deleting outdated route: %s", r.Name) - if err := routeClient.Delete(r.Name, nil); err != nil { - if !apierrs.IsNotFound(err) { - return err - } - } - } - return nil -} - -func (c *Controller) addConfigurationEvent(obj interface{}) { - config := obj.(*v1alpha1.Configuration) - configName := config.Name - ns := config.Namespace - - if config.Status.LatestReadyRevisionName == "" { - c.Logger.Infof("Configuration %s is not ready", configName) - return - } - - routeName, ok := config.Labels[serving.RouteLabelKey] - if !ok { - c.Logger.Infof("Configuration %s does not have label %s", configName, serving.RouteLabelKey) - return - } - - logger := loggerWithRouteInfo(c.Logger, ns, routeName) - ctx := logging.WithLogger(context.TODO(), logger) - route, err := c.lister.Routes(ns).Get(routeName) - if err != nil { - logger.Error("Error fetching route upon configuration becoming ready", zap.Error(err)) - return - } - - // Don't modify the informers copy - route = route.DeepCopy() - if _, err := c.syncTrafficTargetsAndUpdateRouteStatus(ctx, route); err != nil { - logger.Error("Error updating route upon configuration becoming ready", zap.Error(err)) - } -} - -func (c *Controller) updateConfigurationEvent(old, new interface{}) { - c.addConfigurationEvent(new) -} - -func (c *Controller) updateIngressEvent(old, new interface{}) { - ingress := new.(*v1beta1.Ingress) - // If ingress isn't owned by a route, no route update is required. - routeName := controller.LookupOwningRouteName(ingress.OwnerReferences) - if routeName == "" { - return - } - if len(ingress.Status.LoadBalancer.Ingress) == 0 { - // Route isn't ready if having no load-balancer ingress. - return - } - for _, i := range ingress.Status.LoadBalancer.Ingress { - if i.IP == "" { - // Route isn't ready if some load balancer ingress doesn't have an IP. - return - } - } - ns := ingress.Namespace - routeClient := c.ElaClientSet.ServingV1alpha1().Routes(ns) - route, err := routeClient.Get(routeName, metav1.GetOptions{}) - if err != nil { - c.Logger.Error( - "Error fetching route upon ingress becoming", - zap.Error(err), - zap.String(logkey.Namespace, ns), - zap.String(logkey.Route, routeName)) - return - } - if route.Status.IsReady() { - return - } - // Mark route as ready. - route.Status.SetCondition(&v1alpha1.RouteCondition{ - Type: v1alpha1.RouteConditionReady, - Status: corev1.ConditionTrue, - }) - - if _, err = routeClient.Update(route); err != nil { - c.Logger.Error( - "Error updating readiness of route upon ingress becoming", - zap.Error(err), - zap.String(logkey.Namespace, ns), - zap.String(logkey.Route, routeName)) - return - } -} diff --git a/pkg/controller/service/service.go b/pkg/controller/service/service.go index f5b8a908a9b8..afb93a30a429 100644 --- a/pkg/controller/service/service.go +++ b/pkg/controller/service/service.go @@ -1,5 +1,5 @@ /* -Copyright 2018 Google LLC +Copyright 2018 Google LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,63 +18,41 @@ package service import ( "fmt" - "reflect" + "github.com/knative/serving/pkg/apis/serving/v1alpha1" + clientset "github.com/knative/serving/pkg/client/clientset/versioned" + informers "github.com/knative/serving/pkg/client/informers/externalversions" + listers "github.com/knative/serving/pkg/client/listers/serving/v1alpha1" + "github.com/knative/serving/pkg/controller" "go.uber.org/zap" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/runtime" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" - - "github.com/knative/serving/pkg/apis/serving/v1alpha1" - clientset "github.com/knative/serving/pkg/client/clientset/versioned" - informers "github.com/knative/serving/pkg/client/informers/externalversions" - listers "github.com/knative/serving/pkg/client/listers/serving/v1alpha1" - "github.com/knative/serving/pkg/controller" - "github.com/knative/serving/pkg/logging/logkey" - "go.opencensus.io/stats" - "go.opencensus.io/tag" ) -var ( - processItemCount = stats.Int64( - "controller_service_queue_process_count", - "Counter to keep track of items in the service work queue", - stats.UnitNone) - statusTagKey tag.Key -) +const controllerAgentName = "service-controller" -const ( - controllerAgentName = "service-controller" -) +// Receiver defines the interface that a receiver must implement to receive events +// from this Controller. +type Receiver interface { + SyncService(*v1alpha1.Service) error +} -// Controller implements the controller for Service resources. -// +controller:group=ela,version=v1alpha1,kind=Service,resource=services +// Controller implements the controller for Service resources type Controller struct { *controller.Base - // lister indexes properties about Services + // lister indexes properties about Service lister listers.ServiceLister synced cache.InformerSynced - // workqueue is a rate limited work queue. This is used to queue work to be - // processed instead of performing it as soon as a change happens. This - // means we can ensure we only process a fixed amount of resources at a - // time, and makes it easy to ensure we are never processing the same item - // simultaneously in two different workers. - workqueue workqueue.RateLimitingInterface - // recorder is an event recorder for recording Event resources to the - // Kubernetes API. - recorder record.EventRecorder + receivers []Receiver } -// NewController initializes the controller and is called by the generated code -// Registers eventhandlers to enqueue events +// NewController creates a new Service controller func NewController( kubeClientSet kubernetes.Interface, elaClientSet clientset.Interface, @@ -82,19 +60,29 @@ func NewController( elaInformerFactory informers.SharedInformerFactory, config *rest.Config, controllerConfig controller.Config, - logger *zap.SugaredLogger) controller.Interface { + logger *zap.SugaredLogger, + receivers ...interface{}) (controller.Interface, error) { - // obtain references to a shared index informer for the Services. + // obtain references to a shared index informer for the Service type. informer := elaInformerFactory.Serving().V1alpha1().Services() controller := &Controller{ Base: controller.NewBase(kubeClientSet, elaClientSet, kubeInformerFactory, - elaInformerFactory, informer.Informer(), controllerAgentName, "Revisions", logger), + elaInformerFactory, informer.Informer(), controllerAgentName, "Services", logger), lister: informer.Lister(), synced: informer.Informer().HasSynced, } - return controller + for _, rcv := range receivers { + if dr, ok := rcv.(Receiver); ok { + controller.receivers = append(controller.receivers, dr) + } + } + if len(controller.receivers) == 0 { + return nil, fmt.Errorf("None of the provided receivers implement service.Receiver. " + + "If the Service controller is no longer needed it should be removed.") + } + return controller, nil } // Run will set up the event handlers for types we are interested in, as well @@ -102,19 +90,14 @@ func NewController( // is closed, at which point it will shutdown the workqueue and wait for // workers to finish processing their current work items. func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { - return c.RunController(threadiness, stopCh, []cache.InformerSynced{c.synced}, - c.updateServiceEvent, "Service") + return c.RunController(threadiness, stopCh, + []cache.InformerSynced{c.synced}, c.syncHandler, "Service") } -// loggerWithServiceInfo enriches the logs with service name and namespace. -func loggerWithServiceInfo(logger *zap.SugaredLogger, ns string, name string) *zap.SugaredLogger { - return logger.With(zap.String(logkey.Namespace, ns), zap.String(logkey.Service, name)) -} - -// updateServiceEvent compares the actual state with the desired, and attempts to -// converge the two. It then updates the Status block of the Service resource -// with the current status of the resource. -func (c *Controller) updateServiceEvent(key string) error { +// syncHandler compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Service +// resource with the current status of the resource. +func (c *Controller) syncHandler(key string) error { // Convert the namespace/name string into a distinct namespace and name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { @@ -122,14 +105,12 @@ func (c *Controller) updateServiceEvent(key string) error { return nil } - logger := loggerWithServiceInfo(c.Logger, namespace, name) - // Get the Service resource with this namespace/name - service, err := c.lister.Services(namespace).Get(name) + original, err := c.lister.Services(namespace).Get(name) if err != nil { // The resource may no longer exist, in which case we stop // processing. - if apierrs.IsNotFound(err) { + if errors.IsNotFound(err) { runtime.HandleError(fmt.Errorf("service %q in work queue no longer exists", key)) return nil } @@ -137,99 +118,12 @@ func (c *Controller) updateServiceEvent(key string) error { return err } - // Don't modify the informers copy - service = service.DeepCopy() - - // We added the Generation to avoid fighting the Configuration controller, - // which adds a Generation to avoid fighting the Revision controller. We - // shouldn't need this once k8s 1.10 lands, see: - // https://github.com/kubernetes/kubernetes/issues/58778 - // TODO(#642): Remove this. - if service.GetGeneration() == service.Status.ObservedGeneration { - logger.Infof("Skipping reconcile since already reconciled %d == %d", - service.Spec.Generation, service.Status.ObservedGeneration) - return nil - } - - logger.Infof("Running reconcile Service for %s\n%+v\n", service.Name, service) - - config := MakeServiceConfiguration(service) - if err := c.reconcileConfiguration(config); err != nil { - logger.Errorf("Failed to update Configuration for %q: %v", service.Name, err) - return err - } - - // TODO: If revision is specified, check that the revision is ready before - // switching routes to it. Though route controller might just do the right thing? - - route := MakeServiceRoute(service, config.Name) - if err := c.reconcileRoute(route); err != nil { - logger.Errorf("Failed to update Route for %q: %v", service.Name, err) - return err - } - - // Update the Status of the Service with the latest generation that - // we just reconciled against so we don't keep generating Revisions. - // TODO(#642): Remove this. - service.Status.ObservedGeneration = service.Spec.Generation - - logger.Infof("Updating the Service status:\n%+v", service) - - if _, err := c.updateStatus(service); err != nil { - logger.Errorf("Failed to update Service %q: %v", service.Name, err) - return err - } - - return nil -} - -func (c *Controller) updateStatus(service *v1alpha1.Service) (*v1alpha1.Service, error) { - serviceClient := c.ElaClientSet.ServingV1alpha1().Services(service.Namespace) - existing, err := serviceClient.Get(service.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - // Check if there is anything to update. - if !reflect.DeepEqual(existing.Status, service.Status) { - existing.Status = service.Status - // TODO: for CRD there's no updatestatus, so use normal update. - return serviceClient.Update(existing) - } - return existing, nil -} - -func (c *Controller) reconcileConfiguration(config *v1alpha1.Configuration) error { - configClient := c.ElaClientSet.ServingV1alpha1().Configurations(config.Namespace) - - existing, err := configClient.Get(config.Name, metav1.GetOptions{}) - if err != nil { - if apierrs.IsNotFound(err) { - _, err := configClient.Create(config) - return err - } - return err - } - // TODO(vaikas): Perhaps only update if there are actual changes. - copy := existing.DeepCopy() - copy.Spec = config.Spec - _, err = configClient.Update(copy) - return err -} - -func (c *Controller) reconcileRoute(route *v1alpha1.Route) error { - routeClient := c.ElaClientSet.ServingV1alpha1().Routes(route.Namespace) - - existing, err := routeClient.Get(route.Name, metav1.GetOptions{}) - if err != nil { - if apierrs.IsNotFound(err) { - _, err := routeClient.Create(route) + for _, dr := range c.receivers { + // Don't modify the informer's copy, and give each receiver a fresh copy. + cp := original.DeepCopy() + if err := dr.SyncService(cp); err != nil { return err } - return err } - // TODO(vaikas): Perhaps only update if there are actual changes. - copy := existing.DeepCopy() - copy.Spec = route.Spec - _, err = routeClient.Update(copy) - return err + return nil } diff --git a/pkg/logging/logkey/constants.go b/pkg/logging/logkey/constants.go index 3530ac6c3edc..d95f3b097a15 100644 --- a/pkg/logging/logkey/constants.go +++ b/pkg/logging/logkey/constants.go @@ -20,6 +20,9 @@ const ( // ControllerType is the key used for controller type in structured logs ControllerType = "knative.dev/controller" + // ControllerType is the key used for controller type in structured logs + ReceiverType = "knative.dev/receiver" + // Namespace is the key used for namespace in structured logs Namespace = "knative.dev/namespace" diff --git a/pkg/receiver/configuration/configuration.go b/pkg/receiver/configuration/configuration.go new file mode 100644 index 000000000000..581eb4585c4f --- /dev/null +++ b/pkg/receiver/configuration/configuration.go @@ -0,0 +1,134 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "fmt" + + buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" + "github.com/knative/serving/pkg/apis/serving" + "github.com/knative/serving/pkg/apis/serving/v1alpha1" + "github.com/knative/serving/pkg/controller" + "github.com/knative/serving/pkg/logging/logkey" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SyncConfiguration implements configuration.Receiver +func (c *Receiver) SyncConfiguration(config *v1alpha1.Configuration) error { + logger := loggerWithConfigInfo(c.Logger, config.Namespace, config.Name) + + // Configuration business logic + if config.GetGeneration() == config.Status.ObservedGeneration { + // TODO(vaikas): Check to see if Status.LatestCreatedRevisionName is ready and update Status.LatestReady + logger.Infof("Skipping reconcile since already reconciled %d == %d", + config.Spec.Generation, config.Status.ObservedGeneration) + return nil + } + + logger.Infof("Running reconcile Configuration for %s\n%+v\n%+v\n", + config.Name, config, config.Spec.RevisionTemplate) + spec := config.Spec.RevisionTemplate.Spec + controllerRef := controller.NewConfigurationControllerRef(config) + + if config.Spec.Build != nil { + // TODO(mattmoor): Determine whether we reuse the previous build. + build := &buildv1alpha1.Build{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: config.Namespace, + GenerateName: fmt.Sprintf("%s-", config.Name), + }, + Spec: *config.Spec.Build, + } + build.OwnerReferences = append(build.OwnerReferences, *controllerRef) + created, err := c.buildClientSet.BuildV1alpha1().Builds(build.Namespace).Create(build) + if err != nil { + logger.Errorf("Failed to create Build:\n%+v\n%s", build, err) + c.Recorder.Eventf(config, corev1.EventTypeWarning, "CreationFailed", "Failed to create Build %q: %v", build.Name, err) + return err + } + logger.Infof("Created Build:\n%+v", created.Name) + c.Recorder.Eventf(config, corev1.EventTypeNormal, "Created", "Created Build %q", created.Name) + spec.BuildName = created.Name + } + + revName, err := generateRevisionName(config) + if err != nil { + return err + } + + revClient := c.ElaClientSet.ServingV1alpha1().Revisions(config.Namespace) + created, err := revClient.Get(revName, metav1.GetOptions{}) + if err != nil { + if !errors.IsNotFound(err) { + logger.Error("Revisions Get failed", zap.Error(err), zap.String(logkey.Revision, revName)) + return err + } + + rev := &v1alpha1.Revision{ + ObjectMeta: config.Spec.RevisionTemplate.ObjectMeta, + Spec: spec, + } + // TODO: Should this just use rev.ObjectMeta.GenerateName = + rev.ObjectMeta.Name = revName + // Can't generate objects in a different namespace from what the call is made against, + // so use the namespace of the configuration that's being updated for the Revision being + // created. + rev.ObjectMeta.Namespace = config.Namespace + + if rev.ObjectMeta.Labels == nil { + rev.ObjectMeta.Labels = make(map[string]string) + } + rev.ObjectMeta.Labels[serving.ConfigurationLabelKey] = config.Name + + if rev.ObjectMeta.Annotations == nil { + rev.ObjectMeta.Annotations = make(map[string]string) + } + rev.ObjectMeta.Annotations[serving.ConfigurationGenerationAnnotationKey] = fmt.Sprintf("%v", config.Spec.Generation) + + // Delete revisions when the parent Configuration is deleted. + rev.OwnerReferences = append(rev.OwnerReferences, *controllerRef) + + created, err = revClient.Create(rev) + if err != nil { + logger.Errorf("Failed to create Revision:\n%+v\n%s", rev, err) + c.Recorder.Eventf(config, corev1.EventTypeWarning, "CreationFailed", "Failed to create Revision %q: %v", rev.Name, err) + return err + } + c.Recorder.Eventf(config, corev1.EventTypeNormal, "Created", "Created Revision %q", rev.Name) + logger.Infof("Created Revision:\n%+v", created) + } else { + logger.Infof("Revision already created %s: %s", created.ObjectMeta.Name, err) + } + // Update the Status of the configuration with the latest generation that + // we just reconciled against so we don't keep generating revisions. + // Also update the LatestCreatedRevisionName so that we'll know revision to check + // for ready state so that when ready, we can make it Latest. + config.Status.LatestCreatedRevisionName = created.ObjectMeta.Name + config.Status.ObservedGeneration = config.Spec.Generation + + logger.Infof("Updating the configuration status:\n%+v", config) + + if _, err = c.updateStatus(config); err != nil { + logger.Error("Failed to update the configuration", zap.Error(err)) + return err + } + + return nil +} diff --git a/pkg/controller/configuration/configuration_test.go b/pkg/receiver/configuration/configuration_test.go similarity index 86% rename from pkg/controller/configuration/configuration_test.go rename to pkg/receiver/configuration/configuration_test.go index e6218f684977..db006c7bf333 100644 --- a/pkg/controller/configuration/configuration_test.go +++ b/pkg/receiver/configuration/configuration_test.go @@ -17,14 +17,14 @@ limitations under the License. package configuration /* TODO tests: -- If a Congfiguration is created and deleted before the queue fires, no Revision +- If a Configuration is created and deleted before the queue fires, no Revision is created. -- When a Congfiguration is updated, a new Revision is created and - Congfiguration's LatestReadyRevisionName points to it. Also the previous Congfiguration +- When a Configuration is updated, a new Revision is created and + Configuration's LatestReadyRevisionName points to it. Also the previous Configuration still exists. -- When a Congfiguration controller is created and a Congfiguration is already +- When a Configuration controller is created and a Configuration is already out of sync, the controller creates or updates a Revision for the out of sync - Congfiguration. + Configuration. */ import ( "fmt" @@ -46,7 +46,6 @@ import ( ctrl "github.com/knative/serving/pkg/controller" "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" kubeinformers "k8s.io/client-go/informers" fakekubeclientset "k8s.io/client-go/kubernetes/fake" @@ -123,11 +122,11 @@ func getTestRevision() *v1alpha1.Revision { } } -func newTestController(t *testing.T, elaObjects ...runtime.Object) ( +func newTestReceiver(t *testing.T, elaObjects ...runtime.Object) ( kubeClient *fakekubeclientset.Clientset, buildClient *fakebuildclientset.Clientset, elaClient *fakeclientset.Clientset, - controller *Controller, + receiver *Receiver, kubeInformer kubeinformers.SharedInformerFactory, elaInformer informers.SharedInformerFactory) { @@ -144,7 +143,7 @@ func newTestController(t *testing.T, elaObjects ...runtime.Object) ( kubeInformer = kubeinformers.NewSharedInformerFactory(kubeClient, 0) elaInformer = informers.NewSharedInformerFactory(elaClient, 0) - controller = NewController( + receiver = New( kubeClient, elaClient, buildClient, @@ -153,47 +152,13 @@ func newTestController(t *testing.T, elaObjects ...runtime.Object) ( &rest.Config{}, ctrl.Config{}, zap.NewNop().Sugar(), - ).(*Controller) + ).(*Receiver) return } -func newRunningTestController(t *testing.T, elaObjects ...runtime.Object) ( - kubeClient *fakekubeclientset.Clientset, - elaClient *fakeclientset.Clientset, - controller *Controller, - kubeInformer kubeinformers.SharedInformerFactory, - elaInformer informers.SharedInformerFactory, - stopCh chan struct{}) { - - kubeClient, _, elaClient, controller, kubeInformer, elaInformer = newTestController(t, elaObjects...) - - // Start the informers. This must happen after the call to NewController, - // otherwise there are no informers to be started. - stopCh = make(chan struct{}) - kubeInformer.Start(stopCh) - elaInformer.Start(stopCh) - - // Run the controller. - go func() { - if err := controller.Run(2, stopCh); err != nil { - t.Fatalf("Error running controller: %v", err) - } - }() - - return -} - -func keyOrDie(obj interface{}) string { - key, err := cache.MetaNamespaceKeyFunc(obj) - if err != nil { - panic(err) - } - return key -} - func TestCreateConfigurationsCreatesRevision(t *testing.T) { - kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) config := getTestConfiguration() h := NewHooks() @@ -204,7 +169,7 @@ func TestCreateConfigurationsCreatesRevision(t *testing.T) { elaClient.ServingV1alpha1().Configurations(testNamespace).Create(config) // Since syncHandler looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Configurations().Informer().GetIndexer().Add(config) - controller.syncHandler(keyOrDie(config)) + receiver.SyncConfiguration(config) list, err := elaClient.ServingV1alpha1().Revisions(testNamespace).List(metav1.ListOptions{}) @@ -251,7 +216,7 @@ func TestCreateConfigurationsCreatesRevision(t *testing.T) { } func TestCreateConfigurationCreatesBuildAndRevision(t *testing.T) { - _, buildClient, elaClient, controller, _, elaInformer := newTestController(t) + _, buildClient, elaClient, receiver, _, elaInformer := newTestReceiver(t) config := getTestConfiguration() config.Spec.Build = &buildv1alpha1.BuildSpec{ Steps: []corev1.Container{{ @@ -265,7 +230,7 @@ func TestCreateConfigurationCreatesBuildAndRevision(t *testing.T) { elaClient.ServingV1alpha1().Configurations(testNamespace).Create(config) // Since syncHandler looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Configurations().Informer().GetIndexer().Add(config) - controller.syncHandler(keyOrDie(config)) + receiver.SyncConfiguration(config) revList, err := elaClient.ServingV1alpha1().Revisions(testNamespace).List(metav1.ListOptions{}) if err != nil { @@ -293,7 +258,7 @@ func TestCreateConfigurationCreatesBuildAndRevision(t *testing.T) { } func TestMarkConfigurationReadyWhenLatestRevisionReady(t *testing.T) { - kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) configClient := elaClient.ServingV1alpha1().Configurations(testNamespace) config := getTestConfiguration() @@ -308,7 +273,7 @@ func TestMarkConfigurationReadyWhenLatestRevisionReady(t *testing.T) { configClient.Create(config) // Since syncHandler looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Configurations().Informer().GetIndexer().Add(config) - controller.syncHandler(keyOrDie(config)) + receiver.SyncConfiguration(config) reconciledConfig, err := configClient.Get(config.Name, metav1.GetOptions{}) if err != nil { @@ -341,9 +306,9 @@ func TestMarkConfigurationReadyWhenLatestRevisionReady(t *testing.T) { Status: corev1.ConditionTrue, }}, } - // Since addRevisionEvent looks in the lister, we need to add it to the informer + // Since SyncRevision looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Configurations().Informer().GetIndexer().Add(reconciledConfig) - controller.addRevisionEvent(&revision) + receiver.SyncRevision(&revision) readyConfig, err := configClient.Get(config.Name, metav1.GetOptions{}) if err != nil { @@ -371,14 +336,14 @@ func TestMarkConfigurationReadyWhenLatestRevisionReady(t *testing.T) { } func TestDoNotUpdateConfigurationWhenRevisionIsNotReady(t *testing.T) { - _, _, elaClient, controller, _, elaInformer := newTestController(t) + _, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) configClient := elaClient.ServingV1alpha1().Configurations(testNamespace) config := getTestConfiguration() config.Status.LatestCreatedRevisionName = revName configClient.Create(config) - // Since addRevisionEvent looks in the lister, we need to add it to the informer + // Since SyncRevision looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Configurations().Informer().GetIndexer().Add(config) // Get the configuration after reconciling @@ -392,7 +357,7 @@ func TestDoNotUpdateConfigurationWhenRevisionIsNotReady(t *testing.T) { controllerRef := ctrl.NewConfigurationControllerRef(config) revision := getTestRevision() revision.OwnerReferences = append(revision.OwnerReferences, *controllerRef) - controller.addRevisionEvent(revision) + receiver.SyncRevision(revision) // Configuration should not have changed. actualConfig, err := configClient.Get(config.Name, metav1.GetOptions{}) @@ -406,14 +371,14 @@ func TestDoNotUpdateConfigurationWhenRevisionIsNotReady(t *testing.T) { } func TestDoNotUpdateConfigurationWhenReadyRevisionIsNotLatestCreated(t *testing.T) { - _, _, elaClient, controller, _, elaInformer := newTestController(t) + _, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) configClient := elaClient.ServingV1alpha1().Configurations(testNamespace) config := getTestConfiguration() // Don't set LatestCreatedRevisionName. configClient.Create(config) - // Since addRevisionEvent looks in the lister, we need to add it to the informer + // Since SyncRevision looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Configurations().Informer().GetIndexer().Add(config) // Get the configuration after reconciling @@ -434,7 +399,7 @@ func TestDoNotUpdateConfigurationWhenReadyRevisionIsNotLatestCreated(t *testing. }}, } - controller.addRevisionEvent(revision) + receiver.SyncRevision(revision) // Configuration should not have changed. actualConfig, err := configClient.Get(config.Name, metav1.GetOptions{}) @@ -448,7 +413,7 @@ func TestDoNotUpdateConfigurationWhenReadyRevisionIsNotLatestCreated(t *testing. } func TestDoNotUpdateConfigurationWhenLatestReadyRevisionNameIsUpToDate(t *testing.T) { - _, _, elaClient, controller, _, elaInformer := newTestController(t) + _, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) configClient := elaClient.ServingV1alpha1().Configurations(testNamespace) config := getTestConfiguration() @@ -464,7 +429,7 @@ func TestDoNotUpdateConfigurationWhenLatestReadyRevisionNameIsUpToDate(t *testin LatestReadyRevisionName: revName, } configClient.Create(config) - // Since addRevisionEvent looks in the lister, we need to add it to the informer + // Since SyncRevision looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Configurations().Informer().GetIndexer().Add(config) // Create a revision owned by this Configuration. This revision is Ready and @@ -479,11 +444,11 @@ func TestDoNotUpdateConfigurationWhenLatestReadyRevisionNameIsUpToDate(t *testin }}, } - controller.addRevisionEvent(revision) + receiver.SyncRevision(revision) } func TestMarkConfigurationStatusWhenLatestRevisionIsNotReady(t *testing.T) { - kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) configClient := elaClient.ServingV1alpha1().Configurations(testNamespace) config := getTestConfiguration() @@ -497,7 +462,7 @@ func TestMarkConfigurationStatusWhenLatestRevisionIsNotReady(t *testing.T) { configClient.Create(config) // Since syncHandler looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Configurations().Informer().GetIndexer().Add(config) - controller.syncHandler(keyOrDie(config)) + receiver.SyncConfiguration(config) reconciledConfig, err := configClient.Get(config.Name, metav1.GetOptions{}) if err != nil { @@ -521,9 +486,9 @@ func TestMarkConfigurationStatusWhenLatestRevisionIsNotReady(t *testing.T) { Message: "Build step failed with error", }}, } - // Since addRevisionEvent looks in the lister, we need to add it to the informer + // Since SyncRevision looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Configurations().Informer().GetIndexer().Add(reconciledConfig) - controller.addRevisionEvent(&revision) + receiver.SyncRevision(&revision) readyConfig, err := configClient.Get(config.Name, metav1.GetOptions{}) if err != nil { @@ -557,7 +522,7 @@ func TestMarkConfigurationStatusWhenLatestRevisionIsNotReady(t *testing.T) { } func TestMarkConfigurationReadyWhenLatestRevisionRecovers(t *testing.T) { - kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) configClient := elaClient.ServingV1alpha1().Configurations(testNamespace) config := getTestConfiguration() @@ -589,9 +554,9 @@ func TestMarkConfigurationReadyWhenLatestRevisionRecovers(t *testing.T) { Status: corev1.ConditionTrue, }}, } - // Since addRevisionEvent looks in the lister, we need to add it to the informer + // Since SyncRevision looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Configurations().Informer().GetIndexer().Add(config) - controller.addRevisionEvent(revision) + receiver.SyncRevision(revision) readyConfig, err := configClient.Get(config.Name, metav1.GetOptions{}) if err != nil { diff --git a/pkg/receiver/configuration/helper.go b/pkg/receiver/configuration/helper.go new file mode 100644 index 000000000000..8e5ebebfd44f --- /dev/null +++ b/pkg/receiver/configuration/helper.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "context" + "fmt" + + "github.com/knative/serving/pkg/apis/serving/v1alpha1" + "github.com/knative/serving/pkg/logging" + "github.com/knative/serving/pkg/logging/logkey" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// loggerWithConfigInfo enriches the logs with configuration name and namespace. +func loggerWithConfigInfo(logger *zap.SugaredLogger, ns string, name string) *zap.SugaredLogger { + return logger.With(zap.String(logkey.Namespace, ns), zap.String(logkey.Configuration, name)) +} + +func generateRevisionName(u *v1alpha1.Configuration) (string, error) { + // TODO: consider making sure the length of the + // string will not cause problems down the stack + if u.Spec.Generation == 0 { + return "", fmt.Errorf("configuration generation cannot be 0") + } + return fmt.Sprintf("%s-%05d", u.Name, u.Spec.Generation), nil +} + +func (c *Receiver) updateStatus(u *v1alpha1.Configuration) (*v1alpha1.Configuration, error) { + configClient := c.ElaClientSet.ServingV1alpha1().Configurations(u.Namespace) + newu, err := configClient.Get(u.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + newu.Status = u.Status + + // TODO: for CRD there's no updatestatus, so use normal update + return configClient.Update(newu) + // return configClient.UpdateStatus(newu) +} + +func getLatestRevisionStatusCondition(revision *v1alpha1.Revision) *v1alpha1.RevisionCondition { + for _, cond := range revision.Status.Conditions { + if !(cond.Type == v1alpha1.RevisionConditionReady && cond.Status == corev1.ConditionTrue) { + return &cond + } + } + return nil +} + +// Mark ConfigurationConditionReady of Configuration ready as the given latest +// created revision is ready. +func (c *Receiver) markConfigurationReady( + ctx context.Context, + config *v1alpha1.Configuration, revision *v1alpha1.Revision) { + logger := logging.FromContext(ctx) + logger.Info("Marking Configuration ready") + config.Status.RemoveCondition(v1alpha1.ConfigurationConditionLatestRevisionReady) + config.Status.SetCondition( + &v1alpha1.ConfigurationCondition{ + Type: v1alpha1.ConfigurationConditionReady, + Status: corev1.ConditionTrue, + Reason: "LatestRevisionReady", + }) +} + +// Mark ConfigurationConditionLatestRevisionReady of Configuration to false with the status +// from the revision +func (c *Receiver) markConfigurationLatestRevisionStatus( + ctx context.Context, + config *v1alpha1.Configuration, revision *v1alpha1.Revision) { + logger := logging.FromContext(ctx) + config.Status.RemoveCondition(v1alpha1.ConfigurationConditionReady) + cond := getLatestRevisionStatusCondition(revision) + if cond == nil { + logger.Info("Revision status is not updated yet") + return + } + config.Status.SetCondition( + &v1alpha1.ConfigurationCondition{ + Type: v1alpha1.ConfigurationConditionLatestRevisionReady, + Status: corev1.ConditionFalse, + Reason: cond.Reason, + Message: cond.Message, + }) +} diff --git a/pkg/receiver/configuration/receiver.go b/pkg/receiver/configuration/receiver.go new file mode 100644 index 000000000000..897f9d3d205d --- /dev/null +++ b/pkg/receiver/configuration/receiver.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + buildclientset "github.com/knative/build/pkg/client/clientset/versioned" + clientset "github.com/knative/serving/pkg/client/clientset/versioned" + informers "github.com/knative/serving/pkg/client/informers/externalversions" + listers "github.com/knative/serving/pkg/client/listers/serving/v1alpha1" + "github.com/knative/serving/pkg/controller" + "github.com/knative/serving/pkg/controller/configuration" + "github.com/knative/serving/pkg/controller/revision" + "go.uber.org/zap" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +const controllerAgentName = "configuration-controller" + +// Receiver implements the controller for Configuration resources +type Receiver struct { + *controller.Base + + buildClientSet buildclientset.Interface + + // lister indexes properties about Configuration + lister listers.ConfigurationLister + synced cache.InformerSynced +} + +// Receiver implements revision.Receiver +var _ revision.Receiver = (*Receiver)(nil) + +// Receiver implements configuration.Receiver +var _ configuration.Receiver = (*Receiver)(nil) + +// New returns an implementation of several receivers suitable for managing the +// lifecycle of a Configuration. +func New( + kubeClientSet kubernetes.Interface, + elaClientSet clientset.Interface, + buildClientSet buildclientset.Interface, + kubeInformerFactory kubeinformers.SharedInformerFactory, + elaInformerFactory informers.SharedInformerFactory, + config *rest.Config, + controllerConfig controller.Config, + logger *zap.SugaredLogger) configuration.Receiver { + + // obtain references to a shared index informer for the Configuration + // and Revision type. + informer := elaInformerFactory.Serving().V1alpha1().Configurations() + + return &Receiver{ + Base: controller.NewBase(kubeClientSet, elaClientSet, kubeInformerFactory, + elaInformerFactory, informer.Informer(), controllerAgentName, "Configurations", logger), + buildClientSet: buildClientSet, + lister: informer.Lister(), + synced: informer.Informer().HasSynced, + } +} diff --git a/pkg/receiver/configuration/revision.go b/pkg/receiver/configuration/revision.go new file mode 100644 index 000000000000..4cb55d611344 --- /dev/null +++ b/pkg/receiver/configuration/revision.go @@ -0,0 +1,92 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "context" + + "github.com/knative/serving/pkg/apis/serving/v1alpha1" + "github.com/knative/serving/pkg/controller" + "github.com/knative/serving/pkg/logging" + "github.com/knative/serving/pkg/logging/logkey" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" +) + +// SyncRevision implements revsion.Receiver +func (c *Receiver) SyncRevision(revision *v1alpha1.Revision) error { + revisionName := revision.Name + namespace := revision.Namespace + // Lookup and see if this Revision corresponds to a Configuration that + // we own and hence the Configuration that created this Revision. + configName := controller.LookupOwningConfigurationName(revision.OwnerReferences) + if configName == "" { + return nil + } + + logger := loggerWithConfigInfo(c.Logger, namespace, configName).With(zap.String(logkey.Revision, revisionName)) + ctx := logging.WithLogger(context.TODO(), logger) + + config, err := c.lister.Configurations(namespace).Get(configName) + if err != nil { + logger.Error("Error fetching configuration upon revision becoming ready", zap.Error(err)) + return err + } + + if revision.Name != config.Status.LatestCreatedRevisionName { + // The revision isn't the latest created one, so ignore this event. + logger.Info("Revision is not the latest created one") + return nil + } + + // Don't modify the informer's copy. + config = config.DeepCopy() + + if !revision.Status.IsReady() { + logger.Infof("Revision %q of configuration %q is not ready", revisionName, config.Name) + + //add LatestRevision condition to be false with the status from the revision + c.markConfigurationLatestRevisionStatus(ctx, config, revision) + + if _, err := c.updateStatus(config); err != nil { + logger.Error("Error updating configuration", zap.Error(err)) + } + c.Recorder.Eventf(config, corev1.EventTypeNormal, "LatestRevisionUpdate", + "Latest revision of configuration is not ready") + } else { + logger.Info("Revision is ready") + + alreadyReady := config.Status.IsReady() + if !alreadyReady { + c.markConfigurationReady(ctx, config, revision) + } + logger.Infof("Setting LatestReadyRevisionName of Configuration %q to revision %q", + config.Name, revision.Name) + config.Status.LatestReadyRevisionName = revision.Name + + if _, err := c.updateStatus(config); err != nil { + logger.Error("Error updating configuration", zap.Error(err)) + } + if !alreadyReady { + c.Recorder.Eventf(config, corev1.EventTypeNormal, "ConfigurationReady", + "Configuration becomes ready") + } + c.Recorder.Eventf(config, corev1.EventTypeNormal, "LatestReadyUpdate", + "LatestReadyRevisionName updated to %q", revision.Name) + } + return nil +} diff --git a/pkg/receiver/receiver.go b/pkg/receiver/receiver.go new file mode 100644 index 000000000000..2e7c6bead64d --- /dev/null +++ b/pkg/receiver/receiver.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package receiver + +import ( + clientset "github.com/knative/serving/pkg/client/clientset/versioned" + informers "github.com/knative/serving/pkg/client/informers/externalversions" + "github.com/knative/serving/pkg/logging/logkey" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" +) + +// Base implements most of the boilerplate and common code +// we have in our receivers. +type Base struct { + // KubeClientSet allows us to talk to the k8s for core APIs + KubeClientSet kubernetes.Interface + + // ElaClientSet allows us to configure Ela objects + ElaClientSet clientset.Interface + + // KubeInformerFactory provides shared informers for resources + // in all known API group versions + KubeInformerFactory kubeinformers.SharedInformerFactory + + // ElaInformerFactory provides shared informers for resources + // in all known API group versions + ElaInformerFactory informers.SharedInformerFactory + + // Recorder is an event recorder for recording Event resources to the + // Kubernetes API. + Recorder record.EventRecorder + + // Sugared logger is easier to use but is not as performant as the + // raw logger. In performance critical paths, call logger.Desugar() + // and use the returned raw logger instead. In addition to the + // performance benefits, raw logger also preserves type-safety at + // the expense of slightly greater verbosity. + Logger *zap.SugaredLogger +} + +// NewBase instantiates a new instance of Base implementing +// the common & boilerplate code between our receivers. +func NewBase( + kubeClientSet kubernetes.Interface, + elaClientSet clientset.Interface, + kubeInformerFactory kubeinformers.SharedInformerFactory, + elaInformerFactory informers.SharedInformerFactory, + informer cache.SharedIndexInformer, + receiverAgentName string, + logger *zap.SugaredLogger) *Base { + + // Enrich the logs with receiver name + logger = logger.Named(receiverAgentName).With(zap.String(logkey.ReceiverType, receiverAgentName)) + + // Create event broadcaster + logger.Debug("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof) + eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClientSet.CoreV1().Events("")}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: receiverAgentName}) + + return &Base{ + KubeClientSet: kubeClientSet, + ElaClientSet: elaClientSet, + KubeInformerFactory: kubeInformerFactory, + ElaInformerFactory: elaInformerFactory, + Recorder: recorder, + Logger: logger, + } +} diff --git a/pkg/controller/revision/ela_autoscaler.go b/pkg/receiver/revision/autoscaler.go similarity index 100% rename from pkg/controller/revision/ela_autoscaler.go rename to pkg/receiver/revision/autoscaler.go diff --git a/pkg/receiver/revision/build.go b/pkg/receiver/revision/build.go new file mode 100644 index 000000000000..b61a5a410ab4 --- /dev/null +++ b/pkg/receiver/revision/build.go @@ -0,0 +1,45 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" +) + +// SyncBuild implements build.Receiver +func (c *Receiver) SyncBuild(build *buildv1alpha1.Build) error { + cond := getBuildDoneCondition(build) + if cond == nil { + // The build isn't done, so ignore this event. + return nil + } + + // For each of the revisions watching this build, mark their build phase as complete. + for k := range c.buildtracker.GetTrackers(build) { + // Look up the revision to mark complete. + namespace, name := splitKey(k) + rev, err := c.lister.Revisions(namespace).Get(name) + if err != nil { + return err + } + if err := c.markBuildComplete(rev, cond); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/controller/revision/buildtracker.go b/pkg/receiver/revision/buildtracker.go similarity index 100% rename from pkg/controller/revision/buildtracker.go rename to pkg/receiver/revision/buildtracker.go diff --git a/pkg/receiver/revision/constant.go b/pkg/receiver/revision/constant.go new file mode 100644 index 000000000000..29ca05601728 --- /dev/null +++ b/pkg/receiver/revision/constant.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "time" + + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + userContainerName string = "user-container" + userPortName string = "user-port" + userPort = 8080 + + fluentdContainerName string = "fluentd-proxy" + queueContainerName string = "queue-proxy" + // queueSidecarName set by -queueSidecarName flag + queueHTTPPortName string = "queue-http-port" + + requestQueueContainerName string = "request-queue" + + controllerAgentName = "revision-controller" + autoscalerPort = 8080 + + serviceTimeoutDuration = 5 * time.Minute + sidecarIstioInjectAnnotation = "sidecar.istio.io/inject" + // TODO (arvtiwar): this should be a config option. + progressDeadlineSeconds int32 = 120 +) + +var ( + elaPodReplicaCount = int32(1) + elaPodMaxUnavailable = intstr.IntOrString{Type: intstr.Int, IntVal: 1} + elaPodMaxSurge = intstr.IntOrString{Type: intstr.Int, IntVal: 1} +) diff --git a/pkg/receiver/revision/deployment.go b/pkg/receiver/revision/deployment.go new file mode 100644 index 000000000000..ef473c028fe9 --- /dev/null +++ b/pkg/receiver/revision/deployment.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + + "github.com/knative/serving/pkg/apis/serving/v1alpha1" +) + +// SyncDeployment implements deployment.Receiver +func (c *Receiver) SyncDeployment(deployment *appsv1.Deployment) error { + cond := getDeploymentProgressCondition(deployment) + if cond == nil { + return nil + } + + //Get the handle of Revision in context + revName := deployment.Name + namespace := deployment.Namespace + logger := loggerWithRevisionInfo(c.Logger, namespace, revName) + + rev, err := c.lister.Revisions(namespace).Get(revName) + if err != nil { + return err + } + //Set the revision condition reason to ProgressDeadlineExceeded + rev.Status.SetCondition( + &v1alpha1.RevisionCondition{ + Type: v1alpha1.RevisionConditionReady, + Status: corev1.ConditionFalse, + Reason: "ProgressDeadlineExceeded", + Message: fmt.Sprintf("Unable to create pods for more than %d seconds.", progressDeadlineSeconds), + }) + + logger.Infof("Updating status with the following conditions %+v", rev.Status.Conditions) + if _, err := c.updateStatus(rev); err != nil { + return err + } + c.Recorder.Eventf(rev, corev1.EventTypeNormal, "ProgressDeadlineExceeded", "Revision %s not ready due to Deployment timeout", revName) + return nil +} diff --git a/pkg/receiver/revision/endpoint.go b/pkg/receiver/revision/endpoint.go new file mode 100644 index 000000000000..3f77a477df75 --- /dev/null +++ b/pkg/receiver/revision/endpoint.go @@ -0,0 +1,88 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "context" + "log" + "time" + + "github.com/knative/serving/pkg/logging" + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + + // TODO(mattmoor): Move controller deps into receiver + "github.com/knative/serving/pkg/controller" +) + +// SyncEndpoint implements endpoint.Receiver +func (c *Receiver) SyncEndpoint(endpoint *corev1.Endpoints) error { + eName := endpoint.Name + namespace := endpoint.Namespace + + // Lookup and see if this endpoints corresponds to a service that + // we own and hence the Revision that created this service. + revName := lookupServiceOwner(endpoint) + if revName == "" { + return nil + } + logger := loggerWithRevisionInfo(c.Logger, namespace, revName) + ctx := logging.WithLogger(context.TODO(), logger) + + rev, err := c.lister.Revisions(namespace).Get(revName) + if err != nil { + logger.Error("Error fetching revision", zap.Error(err)) + return err + } + + // Check to see if endpoint is the service endpoint + if eName != controller.GetElaK8SServiceNameForRevision(rev) { + return nil + } + + // Check to see if the revision has already been marked as ready or failed + // and if it is, then there's no need to do anything to it. + if rev.Status.IsReady() || rev.Status.IsFailed() { + return nil + } + // Don't modify the informer's copy. + rev = rev.DeepCopy() + + if getIsServiceReady(endpoint) { + logger.Infof("Endpoint %q is ready", eName) + if err := c.markRevisionReady(ctx, rev); err != nil { + logger.Error("Error marking revision ready", zap.Error(err)) + return err + } + c.Recorder.Eventf(rev, corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon endpoint %q becoming ready", endpoint.Name) + return nil + } + + revisionAge := time.Now().Sub(getRevisionLastTransitionTime(rev)) + if revisionAge < serviceTimeoutDuration { + log.Printf("rev %v vs. duration %v", revisionAge, serviceTimeoutDuration) + return nil + } + + if err := c.markRevisionFailed(ctx, rev); err != nil { + logger.Error("Error marking revision failed", zap.Error(err)) + return err + } + c.Recorder.Eventf(rev, corev1.EventTypeWarning, "RevisionFailed", "Revision did not become ready due to endpoint %q", endpoint.Name) + return nil +} diff --git a/pkg/controller/revision/ela_fluentd.go b/pkg/receiver/revision/fluentd.go similarity index 100% rename from pkg/controller/revision/ela_fluentd.go rename to pkg/receiver/revision/fluentd.go diff --git a/pkg/receiver/revision/helper.go b/pkg/receiver/revision/helper.go new file mode 100644 index 000000000000..be50075cb495 --- /dev/null +++ b/pkg/receiver/revision/helper.go @@ -0,0 +1,283 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "context" + "strings" + "time" + + "github.com/knative/serving/pkg/apis/serving" + "github.com/knative/serving/pkg/logging" + "github.com/knative/serving/pkg/logging/logkey" + "go.uber.org/zap" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" + "github.com/knative/serving/pkg/apis/serving/v1alpha1" + // TODO(mattmoor): Move controller deps into receiver + "github.com/knative/serving/pkg/controller" +) + +// loggerWithRevisionInfo enriches the logs with revision name and namespace. +func loggerWithRevisionInfo(logger *zap.SugaredLogger, ns string, name string) *zap.SugaredLogger { + return logger.With(zap.String(logkey.Namespace, ns), zap.String(logkey.Revision, name)) +} + +func (c *Receiver) updateRevisionLoggingURL(rev *v1alpha1.Revision) error { + logURLTmpl := c.controllerConfig.LoggingURLTemplate + if logURLTmpl == "" { + return nil + } + + url := strings.Replace(logURLTmpl, "${REVISION_UID}", string(rev.UID), -1) + + if rev.Status.LogURL == url { + return nil + } + rev.Status.LogURL = url + _, err := c.updateStatus(rev) + return err +} + +// Checks whether the Revision knows whether the build is done. +// TODO(mattmoor): Use a method on the Build type. +func isBuildDone(rev *v1alpha1.Revision) (done, failed bool) { + if rev.Spec.BuildName == "" { + return true, false + } + for _, cond := range rev.Status.Conditions { + if cond.Type != v1alpha1.RevisionConditionBuildSucceeded { + continue + } + switch cond.Status { + case corev1.ConditionTrue: + return true, false + case corev1.ConditionFalse: + return true, true + case corev1.ConditionUnknown: + return false, false + } + } + return false, false +} + +func (c *Receiver) markRevisionReady(ctx context.Context, rev *v1alpha1.Revision) error { + logger := logging.FromContext(ctx) + logger.Info("Marking Revision ready") + rev.Status.SetCondition( + &v1alpha1.RevisionCondition{ + Type: v1alpha1.RevisionConditionReady, + Status: corev1.ConditionTrue, + Reason: "ServiceReady", + }) + _, err := c.updateStatus(rev) + return err +} + +func (c *Receiver) markRevisionFailed(ctx context.Context, rev *v1alpha1.Revision) error { + logger := logging.FromContext(ctx) + logger.Info("Marking Revision failed") + reason, message := "ServiceTimeout", "Timed out waiting for a service endpoint to become ready" + rev.Status.SetCondition( + &v1alpha1.RevisionCondition{ + Type: v1alpha1.RevisionConditionResourcesAvailable, + Status: corev1.ConditionFalse, + Reason: reason, + Message: message, + }) + rev.Status.SetCondition( + &v1alpha1.RevisionCondition{ + Type: v1alpha1.RevisionConditionReady, + Status: corev1.ConditionFalse, + Reason: reason, + Message: message, + }) + _, err := c.updateStatus(rev) + return err +} + +func (c *Receiver) markRevisionBuilding(ctx context.Context, rev *v1alpha1.Revision) error { + logger := logging.FromContext(ctx) + reason := "Building" + logger.Infof("Marking Revision %s", reason) + rev.Status.SetCondition( + &v1alpha1.RevisionCondition{ + Type: v1alpha1.RevisionConditionBuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: reason, + }) + rev.Status.SetCondition( + &v1alpha1.RevisionCondition{ + Type: v1alpha1.RevisionConditionReady, + Status: corev1.ConditionUnknown, + Reason: reason, + }) + // Let this trigger a reconciliation loop. + _, err := c.updateStatus(rev) + return err +} + +func (c *Receiver) markBuildComplete(rev *v1alpha1.Revision, bc *buildv1alpha1.BuildCondition) error { + switch bc.Type { + case buildv1alpha1.BuildComplete: + rev.Status.SetCondition( + &v1alpha1.RevisionCondition{ + Type: v1alpha1.RevisionConditionBuildSucceeded, + Status: corev1.ConditionTrue, + }) + c.Recorder.Event(rev, corev1.EventTypeNormal, "BuildComplete", bc.Message) + case buildv1alpha1.BuildFailed, buildv1alpha1.BuildInvalid: + rev.Status.SetCondition( + &v1alpha1.RevisionCondition{ + Type: v1alpha1.RevisionConditionBuildSucceeded, + Status: corev1.ConditionFalse, + Reason: bc.Reason, + Message: bc.Message, + }) + rev.Status.SetCondition( + &v1alpha1.RevisionCondition{ + Type: v1alpha1.RevisionConditionReady, + Status: corev1.ConditionFalse, + Reason: bc.Reason, + Message: bc.Message, + }) + c.Recorder.Event(rev, corev1.EventTypeWarning, "BuildFailed", bc.Message) + } + // This will trigger a reconciliation that will cause us to stop tracking the build. + _, err := c.updateStatus(rev) + return err +} + +func getBuildDoneCondition(build *buildv1alpha1.Build) *buildv1alpha1.BuildCondition { + for _, cond := range build.Status.Conditions { + if cond.Status != corev1.ConditionTrue { + continue + } + switch cond.Type { + case buildv1alpha1.BuildComplete, buildv1alpha1.BuildFailed, buildv1alpha1.BuildInvalid: + return &cond + } + } + return nil +} + +func getIsServiceReady(e *corev1.Endpoints) bool { + for _, es := range e.Subsets { + if len(es.Addresses) > 0 { + return true + } + } + return false +} + +func getRevisionLastTransitionTime(r *v1alpha1.Revision) time.Time { + condCount := len(r.Status.Conditions) + if condCount == 0 { + return r.CreationTimestamp.Time + } + return r.Status.Conditions[condCount-1].LastTransitionTime.Time +} + +func getDeploymentProgressCondition(deployment *appsv1.Deployment) *appsv1.DeploymentCondition { + //as per https://kubernetes.io/docs/concepts/workloads/controllers/deployment + for _, cond := range deployment.Status.Conditions { + // Look for Deployment with status False + if cond.Status != corev1.ConditionFalse { + continue + } + // with Type Progressing and Reason Timeout + // TODO (arvtiwar): hard coding "ProgressDeadlineExceeded" to avoid import kubernetes/kubernetes + if cond.Type == appsv1.DeploymentProgressing && cond.Reason == "ProgressDeadlineExceeded" { + return &cond + } + } + return nil +} + +func newRevisionNonControllerRef(rev *v1alpha1.Revision) *metav1.OwnerReference { + blockOwnerDeletion := false + isController := false + revRef := controller.NewRevisionControllerRef(rev) + revRef.BlockOwnerDeletion = &blockOwnerDeletion + revRef.Controller = &isController + return revRef +} + +func addOwnerReference(configMap *corev1.ConfigMap, ownerReference *metav1.OwnerReference) { + isOwner := false + for _, existingOwner := range configMap.OwnerReferences { + if ownerReference.Name == existingOwner.Name { + isOwner = true + break + } + } + if !isOwner { + configMap.OwnerReferences = append(configMap.OwnerReferences, *ownerReference) + } +} + +func (c *Receiver) removeFinalizers(ctx context.Context, rev *v1alpha1.Revision, ns string) error { + logger := logging.FromContext(ctx) + logger.Infof("Removing finalizers for %q\n", rev.Name) + accessor, err := meta.Accessor(rev) + if err != nil { + logger.Panic("Failed to get metadata", zap.Error(err)) + } + finalizers := accessor.GetFinalizers() + for i, v := range finalizers { + if v == "controller" { + finalizers = append(finalizers[:i], finalizers[i+1:]...) + } + } + accessor.SetFinalizers(finalizers) + prClient := c.ElaClientSet.ServingV1alpha1().Revisions(rev.Namespace) + prClient.Update(rev) + logger.Infof("The finalizer 'controller' is removed.") + + return nil +} + +func (c *Receiver) updateStatus(rev *v1alpha1.Revision) (*v1alpha1.Revision, error) { + prClient := c.ElaClientSet.ServingV1alpha1().Revisions(rev.Namespace) + newRev, err := prClient.Get(rev.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + newRev.Status = rev.Status + + // TODO: for CRD there's no updatestatus, so use normal update + return prClient.Update(newRev) + // return prClient.UpdateStatus(newRev) +} + +// Given an endpoint see if it's managed by us and return the +// revision that created it. +// TODO: Consider using OwnerReferences. +// https://github.com/kubernetes/sample-controller/blob/master/controller.go#L373-L384 +func lookupServiceOwner(endpoint *corev1.Endpoints) string { + // see if there's a label on this object marking it as ours. + if revisionName, ok := endpoint.Labels[serving.RevisionLabelKey]; ok { + return revisionName + } + return "" +} diff --git a/pkg/controller/revision/ela_pod.go b/pkg/receiver/revision/pod.go similarity index 100% rename from pkg/controller/revision/ela_pod.go rename to pkg/receiver/revision/pod.go diff --git a/pkg/controller/revision/ela_queue.go b/pkg/receiver/revision/queue.go similarity index 100% rename from pkg/controller/revision/ela_queue.go rename to pkg/receiver/revision/queue.go diff --git a/pkg/receiver/revision/receiver.go b/pkg/receiver/revision/receiver.go new file mode 100644 index 000000000000..72a60aafd6b6 --- /dev/null +++ b/pkg/receiver/revision/receiver.go @@ -0,0 +1,142 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "net/http" + + "github.com/josephburnett/k8sflag/pkg/k8sflag" + "go.uber.org/zap" + + clientset "github.com/knative/serving/pkg/client/clientset/versioned" + appsv1 "k8s.io/api/apps/v1" + kubeinformers "k8s.io/client-go/informers" + + informers "github.com/knative/serving/pkg/client/informers/externalversions" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + + listers "github.com/knative/serving/pkg/client/listers/serving/v1alpha1" + "github.com/knative/serving/pkg/controller/build" + "github.com/knative/serving/pkg/controller/deployment" + "github.com/knative/serving/pkg/controller/endpoint" + "github.com/knative/serving/pkg/controller/revision" + "github.com/knative/serving/pkg/receiver" +) + +type resolver interface { + Resolve(*appsv1.Deployment) error +} + +// Receiver implements the receiver for Revision resources. +type Receiver struct { + *receiver.Base + + // lister indexes properties about Revision + lister listers.RevisionLister + synced cache.InformerSynced + + buildtracker *buildTracker + + resolver resolver + + // enableVarLogCollection dedicates whether to set up a fluentd sidecar to + // collect logs under /var/log/. + enableVarLogCollection bool + + // controllerConfig includes the configurations for the controller + controllerConfig *ControllerConfig +} + +// Controller implements service.Receiver +var _ revision.Receiver = (*Receiver)(nil) + +// Controller implements build.Receiver +var _ build.Receiver = (*Receiver)(nil) + +// Controller implements endpoint.Receiver +var _ endpoint.Receiver = (*Receiver)(nil) + +// Controller implements deployment.Receiver +var _ deployment.Receiver = (*Receiver)(nil) + +// ControllerConfig includes the configurations for the controller. +type ControllerConfig struct { + // Autoscale part + + // see (config-autoscaler.yaml) + AutoscaleConcurrencyQuantumOfTime *k8sflag.DurationFlag + AutoscaleEnableSingleConcurrency *k8sflag.BoolFlag + + // AutoscalerImage is the name of the image used for the autoscaler pod. + AutoscalerImage string + + // QueueSidecarImage is the name of the image used for the queue sidecar + // injected into the revision pod + QueueSidecarImage string + + // logging part + + // EnableVarLogCollection dedicates whether to set up a fluentd sidecar to + // collect logs under /var/log/. + EnableVarLogCollection bool + + // TODO(#818): Use the fluentd deamon set to collect /var/log. + // FluentdSidecarImage is the name of the image used for the fluentd sidecar + // injected into the revision pod. It is used only when enableVarLogCollection + // is true. + FluentdSidecarImage string + // FluentdSidecarOutputConfig is the config for fluentd sidecar to specify + // logging output destination. + FluentdSidecarOutputConfig string + + // LoggingURLTemplate is a string containing the logging url template where + // the variable REVISION_UID will be replaced with the created revision's UID. + LoggingURLTemplate string + + // QueueProxyLoggingConfig is a string containing the logger configuration for queue proxy. + QueueProxyLoggingConfig string + + // QueueProxyLoggingLevel is a string containing the logger level for queue proxy. + QueueProxyLoggingLevel string +} + +// New returns an implementation of several receivers suitable for managing the +// lifecycle of a Revision. +func New( + kubeClientSet kubernetes.Interface, + elaClientSet clientset.Interface, + kubeInformerFactory kubeinformers.SharedInformerFactory, + elaInformerFactory informers.SharedInformerFactory, + config *rest.Config, + controllerConfig *ControllerConfig, + logger *zap.SugaredLogger) revision.Receiver { + + // obtain references to a shared index informer for the Revision type. + informer := elaInformerFactory.Serving().V1alpha1().Revisions() + + return &Receiver{ + Base: receiver.NewBase(kubeClientSet, elaClientSet, kubeInformerFactory, + elaInformerFactory, informer.Informer(), controllerAgentName, logger), + lister: informer.Lister(), + synced: informer.Informer().HasSynced, + buildtracker: &buildTracker{builds: map[key]set{}}, + resolver: &digestResolver{client: kubeClientSet, transport: http.DefaultTransport}, + controllerConfig: controllerConfig, + } +} diff --git a/pkg/controller/revision/ela_resolve.go b/pkg/receiver/revision/resolve.go similarity index 100% rename from pkg/controller/revision/ela_resolve.go rename to pkg/receiver/revision/resolve.go diff --git a/pkg/controller/revision/ela_resolve_test.go b/pkg/receiver/revision/resolve_test.go similarity index 100% rename from pkg/controller/revision/ela_resolve_test.go rename to pkg/receiver/revision/resolve_test.go diff --git a/pkg/controller/revision/ela_resource.go b/pkg/receiver/revision/resource.go similarity index 100% rename from pkg/controller/revision/ela_resource.go rename to pkg/receiver/revision/resource.go diff --git a/pkg/receiver/revision/revision.go b/pkg/receiver/revision/revision.go new file mode 100644 index 000000000000..a1935c085bcb --- /dev/null +++ b/pkg/receiver/revision/revision.go @@ -0,0 +1,461 @@ +/* +Copyright 2018 Google LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "context" + "reflect" + + "github.com/knative/serving/pkg/logging" + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/knative/serving/pkg/apis/serving/v1alpha1" + // TODO(mattmoor): Move controller deps into receiver + "github.com/knative/serving/pkg/controller" +) + +// SyncRevision implements revision.Receiver +func (c *Receiver) SyncRevision(rev *v1alpha1.Revision) error { + logger := loggerWithRevisionInfo(c.Logger, rev.Namespace, rev.Name) + ctx := logging.WithLogger(context.TODO(), logger) + + if err := c.updateRevisionLoggingURL(rev); err != nil { + logger.Error("Error updating the revisions logging url", zap.Error(err)) + return err + } + + if rev.Spec.BuildName != "" { + if done, failed := isBuildDone(rev); !done { + if alreadyTracked := c.buildtracker.Track(rev); !alreadyTracked { + if err := c.markRevisionBuilding(ctx, rev); err != nil { + logger.Error("Error recording the BuildSucceeded=Unknown condition", zap.Error(err)) + return err + } + } + return nil + } else { + // The Build's complete, so stop tracking it. + c.buildtracker.Untrack(rev) + if failed { + return nil + } + // If the build didn't fail, proceed to creating K8s resources. + } + } + + if _, err := controller.GetOrCreateRevisionNamespace(ctx, rev.Namespace, c.KubeClientSet); err != nil { + logger.Panic("Failed to create namespace", zap.Error(err)) + } + logger.Info("Namespace validated to exist, moving on") + + return c.reconcileWithImage(ctx, rev, rev.Namespace) +} + +// reconcileWithImage handles enqueued messages that have an image. +func (c *Receiver) reconcileWithImage(ctx context.Context, rev *v1alpha1.Revision, ns string) error { + err := c.reconcileOnceBuilt(ctx, rev, ns) + if err != nil { + logging.FromContext(ctx).Error("Reconcile once build failed", zap.Error(err)) + } + return err +} + +// reconcileOnceBuilt handles enqueued messages that have an image. +func (c *Receiver) reconcileOnceBuilt(ctx context.Context, rev *v1alpha1.Revision, ns string) error { + logger := logging.FromContext(ctx) + accessor, err := meta.Accessor(rev) + if err != nil { + logger.Panic("Failed to get metadata", zap.Error(err)) + } + + deletionTimestamp := accessor.GetDeletionTimestamp() + logger.Infof("Check the deletionTimestamp: %s\n", deletionTimestamp) + + elaNS := controller.GetElaNamespaceName(rev.Namespace) + + if deletionTimestamp == nil && rev.Spec.ServingState == v1alpha1.RevisionServingStateActive { + logger.Info("Creating or reconciling resources for revision") + return c.createK8SResources(ctx, rev, elaNS) + } + return c.deleteK8SResources(ctx, rev, elaNS) +} + +func (c *Receiver) deleteK8SResources(ctx context.Context, rev *v1alpha1.Revision, ns string) error { + logger := logging.FromContext(ctx) + logger.Info("Deleting the resources for revision") + err := c.deleteDeployment(ctx, rev, ns) + if err != nil { + logger.Error("Failed to delete a deployment", zap.Error(err)) + } + logger.Info("Deleted deployment") + + err = c.deleteAutoscalerDeployment(ctx, rev) + if err != nil { + logger.Error("Failed to delete autoscaler Deployment", zap.Error(err)) + } + logger.Info("Deleted autoscaler Deployment") + + err = c.deleteAutoscalerService(ctx, rev) + if err != nil { + logger.Error("Failed to delete autoscaler Service", zap.Error(err)) + } + logger.Info("Deleted autoscaler Service") + + err = c.deleteService(ctx, rev, ns) + if err != nil { + logger.Error("Failed to delete k8s service", zap.Error(err)) + } + logger.Info("Deleted service") + + // And the deployment is no longer ready, so update that + rev.Status.SetCondition( + &v1alpha1.RevisionCondition{ + Type: v1alpha1.RevisionConditionReady, + Status: corev1.ConditionFalse, + Reason: "Inactive", + }) + logger.Infof("Updating status with the following conditions %+v", rev.Status.Conditions) + if _, err := c.updateStatus(rev); err != nil { + logger.Error("Error recording inactivation of revision", zap.Error(err)) + return err + } + + return nil +} + +func (c *Receiver) createK8SResources(ctx context.Context, rev *v1alpha1.Revision, ns string) error { + logger := logging.FromContext(ctx) + // Fire off a Deployment.. + if err := c.reconcileDeployment(ctx, rev, ns); err != nil { + logger.Error("Failed to create a deployment", zap.Error(err)) + return err + } + + // Autoscale the service + if err := c.reconcileAutoscalerDeployment(ctx, rev); err != nil { + logger.Error("Failed to create autoscaler Deployment", zap.Error(err)) + } + if err := c.reconcileAutoscalerService(ctx, rev); err != nil { + logger.Error("Failed to create autoscaler Service", zap.Error(err)) + } + if c.controllerConfig.EnableVarLogCollection { + if err := c.reconcileFluentdConfigMap(ctx, rev); err != nil { + logger.Error("Failed to create fluent config map", zap.Error(err)) + } + } + + // Create k8s service + serviceName, err := c.reconcileService(ctx, rev, ns) + if err != nil { + logger.Error("Failed to create k8s service", zap.Error(err)) + } else { + rev.Status.ServiceName = serviceName + } + + // Check to see if the revision has already been marked as ready and + // don't mark it if it's already ready. + // TODO: could always fetch the endpoint again and double-check it is still + // ready. + if rev.Status.IsReady() { + return nil + } + + // Checking existing revision condition to see if it is the initial deployment or + // during the reactivating process. If a revision is in condition "Inactive" or "Activating", + // we need to route traffic to the activator; if a revision is in condition "Deploying", + // we need to route traffic to the revision directly. + reason := "Deploying" + cond := rev.Status.GetCondition(v1alpha1.RevisionConditionReady) + if cond != nil { + if (cond.Reason == "Inactive" && cond.Status == corev1.ConditionFalse) || + (cond.Reason == "Activating" && cond.Status == corev1.ConditionUnknown) { + reason = "Activating" + } + } + + // By updating our deployment status we will trigger a Reconcile() + // that will watch for service to become ready for serving traffic. + rev.Status.SetCondition( + &v1alpha1.RevisionCondition{ + Type: v1alpha1.RevisionConditionReady, + Status: corev1.ConditionUnknown, + Reason: reason, + }) + logger.Infof("Updating status with the following conditions %+v", rev.Status.Conditions) + if _, err := c.updateStatus(rev); err != nil { + logger.Error("Error recording build completion", zap.Error(err)) + return err + } + + return nil +} + +func (c *Receiver) deleteDeployment(ctx context.Context, rev *v1alpha1.Revision, ns string) error { + logger := logging.FromContext(ctx) + deploymentName := controller.GetRevisionDeploymentName(rev) + dc := c.KubeClientSet.AppsV1().Deployments(ns) + if _, err := dc.Get(deploymentName, metav1.GetOptions{}); err != nil && apierrs.IsNotFound(err) { + return nil + } + + logger.Infof("Deleting Deployment %q", deploymentName) + tmp := metav1.DeletePropagationForeground + err := dc.Delete(deploymentName, &metav1.DeleteOptions{ + PropagationPolicy: &tmp, + }) + if err != nil && !apierrs.IsNotFound(err) { + logger.Errorf("deployments.Delete for %q failed: %s", deploymentName, err) + return err + } + return nil +} + +func (c *Receiver) reconcileDeployment(ctx context.Context, rev *v1alpha1.Revision, ns string) error { + logger := logging.FromContext(ctx) + dc := c.KubeClientSet.AppsV1().Deployments(ns) + // First, check if deployment exists already. + deploymentName := controller.GetRevisionDeploymentName(rev) + + if _, err := dc.Get(deploymentName, metav1.GetOptions{}); err != nil { + if !apierrs.IsNotFound(err) { + logger.Errorf("deployments.Get for %q failed: %s", deploymentName, err) + return err + } + logger.Infof("Deployment %q doesn't exist, creating", deploymentName) + } else { + // TODO(mattmoor): Compare the deployments and update if it has changed + // out from under us. + logger.Infof("Found existing deployment %q", deploymentName) + return nil + } + + // Create the deployment. + controllerRef := controller.NewRevisionControllerRef(rev) + // Create a single pod so that it gets created before deployment->RS to try to speed + // things up + podSpec := MakeElaPodSpec(rev, c.controllerConfig) + deployment := MakeElaDeployment(rev, ns) + deployment.OwnerReferences = append(deployment.OwnerReferences, *controllerRef) + + deployment.Spec.Template.Spec = *podSpec + + // Resolve tag image references to digests. + if err := c.resolver.Resolve(deployment); err != nil { + logger.Error("Error resolving deployment", zap.Error(err)) + rev.Status.SetCondition( + &v1alpha1.RevisionCondition{ + Type: v1alpha1.RevisionConditionContainerHealthy, + Status: corev1.ConditionFalse, + Reason: "ContainerMissing", + Message: err.Error(), + }) + rev.Status.SetCondition( + &v1alpha1.RevisionCondition{ + Type: v1alpha1.RevisionConditionReady, + Status: corev1.ConditionFalse, + Reason: "ContainerMissing", + Message: err.Error(), + }) + if _, err := c.updateStatus(rev); err != nil { + logger.Error("Error recording resolution problem", zap.Error(err)) + return err + } + return err + } + + // Set the ProgressDeadlineSeconds + deployment.Spec.ProgressDeadlineSeconds = new(int32) + *deployment.Spec.ProgressDeadlineSeconds = progressDeadlineSeconds + + logger.Infof("Creating Deployment: %q", deployment.Name) + _, createErr := dc.Create(deployment) + + return createErr +} + +func (c *Receiver) deleteService(ctx context.Context, rev *v1alpha1.Revision, ns string) error { + logger := logging.FromContext(ctx) + sc := c.KubeClientSet.Core().Services(ns) + serviceName := controller.GetElaK8SServiceNameForRevision(rev) + + logger.Infof("Deleting service %q", serviceName) + tmp := metav1.DeletePropagationForeground + err := sc.Delete(serviceName, &metav1.DeleteOptions{ + PropagationPolicy: &tmp, + }) + if err != nil && !apierrs.IsNotFound(err) { + logger.Errorf("service.Delete for %q failed: %s", serviceName, err) + return err + } + return nil +} + +func (c *Receiver) reconcileService(ctx context.Context, rev *v1alpha1.Revision, ns string) (string, error) { + logger := logging.FromContext(ctx) + sc := c.KubeClientSet.Core().Services(ns) + serviceName := controller.GetElaK8SServiceNameForRevision(rev) + + if _, err := sc.Get(serviceName, metav1.GetOptions{}); err != nil { + if !apierrs.IsNotFound(err) { + logger.Errorf("services.Get for %q failed: %s", serviceName, err) + return "", err + } + logger.Infof("serviceName %q doesn't exist, creating", serviceName) + } else { + // TODO(vaikas): Check that the service is legit and matches what we expect + // to have there. + logger.Infof("Found existing service %q", serviceName) + return serviceName, nil + } + + controllerRef := controller.NewRevisionControllerRef(rev) + service := MakeRevisionK8sService(rev, ns) + service.OwnerReferences = append(service.OwnerReferences, *controllerRef) + logger.Infof("Creating service: %q", service.Name) + _, err := sc.Create(service) + return serviceName, err +} + +func (c *Receiver) reconcileFluentdConfigMap(ctx context.Context, rev *v1alpha1.Revision) error { + logger := logging.FromContext(ctx) + ns := rev.Namespace + + // One ConfigMap for Fluentd sidecar per namespace. It has multiple owner + // references. Can not set blockOwnerDeletion and Controller to true. + revRef := newRevisionNonControllerRef(rev) + + cmc := c.KubeClientSet.Core().ConfigMaps(ns) + configMap, err := cmc.Get(fluentdConfigMapName, metav1.GetOptions{}) + if err != nil { + if !apierrs.IsNotFound(err) { + logger.Errorf("configmaps.Get for %q failed: %s", fluentdConfigMapName, err) + return err + } + // ConfigMap doesn't exist, going to create it + configMap = MakeFluentdConfigMap(ns, c.controllerConfig.FluentdSidecarOutputConfig) + configMap.OwnerReferences = append(configMap.OwnerReferences, *revRef) + logger.Infof("Creating configmap: %q", configMap.Name) + _, err = cmc.Create(configMap) + return err + } + + // ConfigMap exists, going to update it + desiredConfigMap := configMap.DeepCopy() + desiredConfigMap.Data = map[string]string{ + "varlog.conf": makeFullFluentdConfig(c.controllerConfig.FluentdSidecarOutputConfig), + } + addOwnerReference(desiredConfigMap, revRef) + if !reflect.DeepEqual(desiredConfigMap, configMap) { + logger.Infof("Updating configmap: %q", desiredConfigMap.Name) + _, err = cmc.Update(desiredConfigMap) + return err + } + return nil +} + +func (c *Receiver) deleteAutoscalerService(ctx context.Context, rev *v1alpha1.Revision) error { + logger := logging.FromContext(ctx) + autoscalerName := controller.GetRevisionAutoscalerName(rev) + sc := c.KubeClientSet.Core().Services(AutoscalerNamespace) + if _, err := sc.Get(autoscalerName, metav1.GetOptions{}); err != nil && apierrs.IsNotFound(err) { + return nil + } + logger.Infof("Deleting autoscaler Service %q", autoscalerName) + tmp := metav1.DeletePropagationForeground + err := sc.Delete(autoscalerName, &metav1.DeleteOptions{ + PropagationPolicy: &tmp, + }) + if err != nil && !apierrs.IsNotFound(err) { + logger.Errorf("Autoscaler Service delete for %q failed: %s", autoscalerName, err) + return err + } + return nil +} + +func (c *Receiver) reconcileAutoscalerService(ctx context.Context, rev *v1alpha1.Revision) error { + logger := logging.FromContext(ctx) + autoscalerName := controller.GetRevisionAutoscalerName(rev) + sc := c.KubeClientSet.Core().Services(AutoscalerNamespace) + _, err := sc.Get(autoscalerName, metav1.GetOptions{}) + if err != nil { + if !apierrs.IsNotFound(err) { + logger.Errorf("Autoscaler Service get for %q failed: %s", autoscalerName, err) + return err + } + logger.Infof("Autoscaler Service %q doesn't exist, creating", autoscalerName) + } else { + logger.Infof("Found existing autoscaler Service %q", autoscalerName) + return nil + } + + controllerRef := controller.NewRevisionControllerRef(rev) + service := MakeElaAutoscalerService(rev) + service.OwnerReferences = append(service.OwnerReferences, *controllerRef) + logger.Infof("Creating autoscaler Service: %q", service.Name) + _, err = sc.Create(service) + return err +} + +func (c *Receiver) deleteAutoscalerDeployment(ctx context.Context, rev *v1alpha1.Revision) error { + logger := logging.FromContext(ctx) + autoscalerName := controller.GetRevisionAutoscalerName(rev) + dc := c.KubeClientSet.AppsV1().Deployments(AutoscalerNamespace) + _, err := dc.Get(autoscalerName, metav1.GetOptions{}) + if err != nil && apierrs.IsNotFound(err) { + return nil + } + logger.Infof("Deleting autoscaler Deployment %q", autoscalerName) + tmp := metav1.DeletePropagationForeground + err = dc.Delete(autoscalerName, &metav1.DeleteOptions{ + PropagationPolicy: &tmp, + }) + if err != nil && !apierrs.IsNotFound(err) { + logger.Errorf("Autoscaler Deployment delete for %q failed: %s", autoscalerName, err) + return err + } + return nil +} + +func (c *Receiver) reconcileAutoscalerDeployment(ctx context.Context, rev *v1alpha1.Revision) error { + logger := logging.FromContext(ctx) + autoscalerName := controller.GetRevisionAutoscalerName(rev) + dc := c.KubeClientSet.AppsV1().Deployments(AutoscalerNamespace) + _, err := dc.Get(autoscalerName, metav1.GetOptions{}) + if err != nil { + if !apierrs.IsNotFound(err) { + logger.Errorf("Autoscaler Deployment get for %q failed: %s", autoscalerName, err) + return err + } + logger.Infof("Autoscaler Deployment %q doesn't exist, creating", autoscalerName) + } else { + logger.Infof("Found existing autoscaler Deployment %q", autoscalerName) + return nil + } + + controllerRef := controller.NewRevisionControllerRef(rev) + deployment := MakeElaAutoscalerDeployment(rev, c.controllerConfig.AutoscalerImage) + deployment.OwnerReferences = append(deployment.OwnerReferences, *controllerRef) + logger.Infof("Creating autoscaler Deployment: %q", deployment.Name) + _, err = dc.Create(deployment) + return err +} diff --git a/pkg/controller/revision/revision_test.go b/pkg/receiver/revision/revision_test.go similarity index 84% rename from pkg/controller/revision/revision_test.go rename to pkg/receiver/revision/revision_test.go index 93c73556c91a..0a7028209118 100644 --- a/pkg/controller/revision/revision_test.go +++ b/pkg/receiver/revision/revision_test.go @@ -36,7 +36,6 @@ import ( "github.com/josephburnett/k8sflag/pkg/k8sflag" buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" fakebuildclientset "github.com/knative/build/pkg/client/clientset/versioned/fake" - buildinformers "github.com/knative/build/pkg/client/informers/externalversions" "github.com/knative/serving/pkg/apis/serving" "github.com/knative/serving/pkg/apis/serving/v1alpha1" fakeclientset "github.com/knative/serving/pkg/client/clientset/versioned/fake" @@ -218,11 +217,11 @@ func (r *nopResolver) Resolve(_ *appsv1.Deployment) error { return nil } -func newTestControllerWithConfig(t *testing.T, controllerConfig *ControllerConfig, elaObjects ...runtime.Object) ( +func newTestReceiverWithConfig(t *testing.T, controllerConfig *ControllerConfig, elaObjects ...runtime.Object) ( kubeClient *fakekubeclientset.Clientset, buildClient *fakebuildclientset.Clientset, elaClient *fakeclientset.Clientset, - controller *Controller, + receiver *Receiver, kubeInformer kubeinformers.SharedInformerFactory, elaInformer informers.SharedInformerFactory) { @@ -234,60 +233,33 @@ func newTestControllerWithConfig(t *testing.T, controllerConfig *ControllerConfi // Create informer factories with fake clients. The second parameter sets the // resync period to zero, disabling it. kubeInformer = kubeinformers.NewSharedInformerFactory(kubeClient, 0) - buildInformer := buildinformers.NewSharedInformerFactory(buildClient, 0) elaInformer = informers.NewSharedInformerFactory(elaClient, 0) - controller = NewController( + receiver = New( kubeClient, elaClient, kubeInformer, elaInformer, - buildInformer, + // buildInformer, &rest.Config{}, controllerConfig, zap.NewNop().Sugar(), - ).(*Controller) + ).(*Receiver) - controller.resolver = &nopResolver{} + receiver.resolver = &nopResolver{} return } -func newTestController(t *testing.T, elaObjects ...runtime.Object) ( +func newTestReceiver(t *testing.T, elaObjects ...runtime.Object) ( kubeClient *fakekubeclientset.Clientset, buildClient *fakebuildclientset.Clientset, elaClient *fakeclientset.Clientset, - controller *Controller, + receiver *Receiver, kubeInformer kubeinformers.SharedInformerFactory, elaInformer informers.SharedInformerFactory) { testControllerConfig := getTestControllerConfig() - return newTestControllerWithConfig(t, &testControllerConfig, elaObjects...) -} - -func newRunningTestController(t *testing.T, elaObjects ...runtime.Object) ( - kubeClient *fakekubeclientset.Clientset, - elaClient *fakeclientset.Clientset, - controller *Controller, - kubeInformer kubeinformers.SharedInformerFactory, - elaInformer informers.SharedInformerFactory, - stopCh chan struct{}) { - - kubeClient, _, elaClient, controller, kubeInformer, elaInformer = newTestController(t, elaObjects...) - - // Start the informers. This must happen after the call to NewController, - // otherwise there are no informers to be started. - stopCh = make(chan struct{}) - kubeInformer.Start(stopCh) - elaInformer.Start(stopCh) - - // Run the controller. - go func() { - if err := controller.Run(2, stopCh); err != nil { - t.Fatalf("Error running controller: %v", err) - } - }() - - return + return newTestReceiverWithConfig(t, &testControllerConfig, elaObjects...) } func compareRevisionConditions(want []v1alpha1.RevisionCondition, got []v1alpha1.RevisionCondition) string { @@ -297,18 +269,17 @@ func compareRevisionConditions(want []v1alpha1.RevisionCondition, got []v1alpha1 return cmp.Diff(want, got) } -func createRevision(elaClient *fakeclientset.Clientset, elaInformer informers.SharedInformerFactory, controller *Controller, rev *v1alpha1.Revision) { +func createRevision(elaClient *fakeclientset.Clientset, elaInformer informers.SharedInformerFactory, receiver *Receiver, rev *v1alpha1.Revision) { elaClient.ServingV1alpha1().Revisions(rev.Namespace).Create(rev) - // Since syncHandler looks in the lister, we need to add it to the informer + // Since SyncRevision looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Revisions().Informer().GetIndexer().Add(rev) - controller.syncHandler(KeyOrDie(rev)) + receiver.SyncRevision(rev) } -func updateRevision(elaClient *fakeclientset.Clientset, elaInformer informers.SharedInformerFactory, controller *Controller, rev *v1alpha1.Revision) { +func updateRevision(elaClient *fakeclientset.Clientset, elaInformer informers.SharedInformerFactory, receiver *Receiver, rev *v1alpha1.Revision) { elaClient.ServingV1alpha1().Revisions(rev.Namespace).Update(rev) elaInformer.Serving().V1alpha1().Revisions().Informer().GetIndexer().Update(rev) - - controller.syncHandler(KeyOrDie(rev)) + receiver.SyncRevision(rev) } type fixedResolver struct { @@ -325,11 +296,11 @@ func (r *fixedResolver) Resolve(deploy *appsv1.Deployment) error { func TestCreateRevCreatesStuff(t *testing.T) { controllerConfig := getTestControllerConfig() - kubeClient, _, elaClient, controller, _, elaInformer := newTestControllerWithConfig(t, &controllerConfig) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiverWithConfig(t, &controllerConfig) // Resolve image references to this "digest" digest := "foo@sha256:deadbeef" - controller.resolver = &fixedResolver{digest} + receiver.resolver = &fixedResolver{digest} rev := getTestRevision() config := getTestConfiguration() @@ -338,7 +309,7 @@ func TestCreateRevCreatesStuff(t *testing.T) { *ctrl.NewConfigurationControllerRef(config), ) - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) // This function is used to verify pass through of container environment // variables. @@ -621,11 +592,11 @@ func (r *errorResolver) Resolve(deploy *appsv1.Deployment) error { } func TestResolutionFailed(t *testing.T) { - _, _, elaClient, controller, _, elaInformer := newTestController(t) + _, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) // Unconditionally return this error during resolution. errorMessage := "I am the expected error message, hear me ROAR!" - controller.resolver = &errorResolver{errorMessage} + receiver.resolver = &errorResolver{errorMessage} rev := getTestRevision() config := getTestConfiguration() @@ -634,7 +605,7 @@ func TestResolutionFailed(t *testing.T) { *ctrl.NewConfigurationControllerRef(config), ) - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) rev, err := elaClient.ServingV1alpha1().Revisions(testNamespace).Get(rev.Name, metav1.GetOptions{}) if err != nil { @@ -661,7 +632,7 @@ func TestResolutionFailed(t *testing.T) { func TestCreateRevDoesNotSetUpFluentdSidecarIfVarLogCollectionDisabled(t *testing.T) { controllerConfig := getTestControllerConfig() controllerConfig.EnableVarLogCollection = false - kubeClient, _, elaClient, controller, _, elaInformer := newTestControllerWithConfig(t, &controllerConfig) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiverWithConfig(t, &controllerConfig) rev := getTestRevision() config := getTestConfiguration() rev.OwnerReferences = append( @@ -669,7 +640,7 @@ func TestCreateRevDoesNotSetUpFluentdSidecarIfVarLogCollectionDisabled(t *testin *ctrl.NewConfigurationControllerRef(config), ) - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) // Look for the revision deployment. expectedDeploymentName := fmt.Sprintf("%s-deployment", rev.Name) @@ -702,7 +673,7 @@ func TestCreateRevDoesNotSetUpFluentdSidecarIfVarLogCollectionDisabled(t *testin } func TestCreateRevUpdateConfigMap_NewData(t *testing.T) { - kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) rev := getTestRevision() fluentdConfigSource := makeFullFluentdConfig(testFluentdSidecarOutputConfig) @@ -717,7 +688,7 @@ func TestCreateRevUpdateConfigMap_NewData(t *testing.T) { } kubeClient.Core().ConfigMaps(testNamespace).Create(existingConfigMap) - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) // Look for the config map. configMap, err := kubeClient.Core().ConfigMaps(testNamespace).Get(fluentdConfigMapName, metav1.GetOptions{}) @@ -731,7 +702,7 @@ func TestCreateRevUpdateConfigMap_NewData(t *testing.T) { } func TestCreateRevUpdateConfigMap_NewRevOwnerReference(t *testing.T) { - kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) rev := getTestRevision() revRef := *newRevisionNonControllerRef(rev) oldRev := getTestRevision() @@ -751,7 +722,7 @@ func TestCreateRevUpdateConfigMap_NewRevOwnerReference(t *testing.T) { } kubeClient.Core().ConfigMaps(testNamespace).Create(existingConfigMap) - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) // Look for the config map. configMap, err := kubeClient.Core().ConfigMaps(testNamespace).Get(fluentdConfigMapName, metav1.GetOptions{}) @@ -767,11 +738,11 @@ func TestCreateRevUpdateConfigMap_NewRevOwnerReference(t *testing.T) { func TestCreateRevWithWithLoggingURL(t *testing.T) { controllerConfig := getTestControllerConfig() controllerConfig.LoggingURLTemplate = "http://logging.test.com?filter=${REVISION_UID}" - _, _, elaClient, controller, _, elaInformer := newTestControllerWithConfig(t, &controllerConfig) + _, _, elaClient, receiver, _, elaInformer := newTestReceiverWithConfig(t, &controllerConfig) revClient := elaClient.ServingV1alpha1().Revisions(testNamespace) rev := getTestRevision() - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) createdRev, err := revClient.Get(rev.Name, metav1.GetOptions{}) if err != nil { @@ -787,15 +758,15 @@ func TestCreateRevWithWithLoggingURL(t *testing.T) { func TestUpdateRevWithWithUpdatedLoggingURL(t *testing.T) { controllerConfig := getTestControllerConfig() controllerConfig.LoggingURLTemplate = "http://old-logging.test.com?filter=${REVISION_UID}" - _, _, elaClient, controller, _, elaInformer := newTestControllerWithConfig(t, &controllerConfig) + _, _, elaClient, receiver, _, elaInformer := newTestReceiverWithConfig(t, &controllerConfig) revClient := elaClient.ServingV1alpha1().Revisions(testNamespace) rev := getTestRevision() - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) // Update controllers logging URL controllerConfig.LoggingURLTemplate = "http://new-logging.test.com?filter=${REVISION_UID}" - updateRevision(elaClient, elaInformer, controller, rev) + updateRevision(elaClient, elaInformer, receiver, rev) updatedRev, err := revClient.Get(rev.Name, metav1.GetOptions{}) if err != nil { @@ -809,14 +780,14 @@ func TestUpdateRevWithWithUpdatedLoggingURL(t *testing.T) { } func TestCreateRevPreservesAppLabel(t *testing.T) { - kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) rev := getTestRevision() rev.Labels[appLabelKey] = "app-label-that-should-stay-unchanged" elaClient.ServingV1alpha1().Revisions(testNamespace).Create(rev) - // Since syncHandler looks in the lister, we need to add it to the informer - elaInformer.Serving().V1alpha1().Revisions().Informer().GetIndexer().Add(rev) - controller.syncHandler(KeyOrDie(rev)) + // Since SyncRevision looks in the lister, we need to add it to the informer + elaInformer.Serving().V1alpha1().Revisions().Informer().GetIndexer().Add(rev) + receiver.SyncRevision(rev) // Look for the revision deployment. expectedDeploymentName := fmt.Sprintf("%s-deployment", rev.Name) @@ -875,7 +846,7 @@ func TestCreateRevPreservesAppLabel(t *testing.T) { } func TestCreateRevWithBuildNameWaits(t *testing.T) { - _, buildClient, elaClient, controller, _, elaInformer := newTestController(t) + _, buildClient, elaClient, receiver, _, elaInformer := newTestReceiver(t) revClient := elaClient.ServingV1alpha1().Revisions(testNamespace) bld := &buildv1alpha1.Build{ @@ -898,7 +869,7 @@ func TestCreateRevWithBuildNameWaits(t *testing.T) { // Direct the Revision to wait for this build to complete. rev.Spec.BuildName = bld.Name - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) waitRev, err := revClient.Get(rev.Name, metav1.GetOptions{}) if err != nil { @@ -906,25 +877,22 @@ func TestCreateRevWithBuildNameWaits(t *testing.T) { } // Ensure that the Revision status is updated. - want := []v1alpha1.RevisionCondition{ - { - Type: "BuildSucceeded", - Status: corev1.ConditionUnknown, - Reason: "Building", - }, - { - Type: "Ready", - Status: corev1.ConditionUnknown, - Reason: "Building", - }, - } + want := []v1alpha1.RevisionCondition{{ + Type: "BuildSucceeded", + Status: corev1.ConditionUnknown, + Reason: "Building", + }, { + Type: "Ready", + Status: corev1.ConditionUnknown, + Reason: "Building", + }} if diff := compareRevisionConditions(want, waitRev.Status.Conditions); diff != "" { t.Errorf("Unexpected revision conditions diff (-want +got): %v", diff) } } func TestCreateRevWithFailedBuildNameFails(t *testing.T) { - kubeClient, buildClient, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, buildClient, elaClient, receiver, _, elaInformer := newTestReceiver(t) revClient := elaClient.ServingV1alpha1().Revisions(testNamespace) reason := "Foo" @@ -955,10 +923,10 @@ func TestCreateRevWithFailedBuildNameFails(t *testing.T) { // Direct the Revision to wait for this build to complete. rev.Spec.BuildName = bld.Name revClient.Create(rev) - // Since syncHandler looks in the lister, we need to add it to the informer + // Since SyncRevision looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Revisions().Informer().GetIndexer().Add(rev) - controller.syncHandler(KeyOrDie(rev)) + receiver.SyncRevision(rev) // After the initial update to the revision, we should be // watching for this build to complete, so make it complete, but @@ -974,7 +942,9 @@ func TestCreateRevWithFailedBuildNameFails(t *testing.T) { }, } - controller.addBuildEvent(bld) + if err := receiver.SyncBuild(bld); err != nil { + t.Fatalf("Couldn't sync build: %v", err) + } failedRev, err := revClient.Get(rev.Name, metav1.GetOptions{}) if err != nil { @@ -984,20 +954,17 @@ func TestCreateRevWithFailedBuildNameFails(t *testing.T) { // The next update we receive should tell us that the build failed, // and surface the reason and message from that failure in our own // status. - want := []v1alpha1.RevisionCondition{ - { - Type: "BuildSucceeded", - Status: corev1.ConditionFalse, - Reason: reason, - Message: errMessage, - }, - { - Type: "Ready", - Status: corev1.ConditionFalse, - Reason: reason, - Message: errMessage, - }, - } + want := []v1alpha1.RevisionCondition{{ + Type: "BuildSucceeded", + Status: corev1.ConditionFalse, + Reason: reason, + Message: errMessage, + }, { + Type: "Ready", + Status: corev1.ConditionFalse, + Reason: reason, + Message: errMessage, + }} if diff := compareRevisionConditions(want, failedRev.Status.Conditions); diff != "" { t.Errorf("Unexpected revision conditions diff (-want +got): %v", diff) } @@ -1009,7 +976,7 @@ func TestCreateRevWithFailedBuildNameFails(t *testing.T) { } func TestCreateRevWithCompletedBuildNameCompletes(t *testing.T) { - kubeClient, buildClient, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, buildClient, elaClient, receiver, _, elaInformer := newTestReceiver(t) revClient := elaClient.ServingV1alpha1().Revisions(testNamespace) h := NewHooks() @@ -1048,24 +1015,23 @@ func TestCreateRevWithCompletedBuildNameCompletes(t *testing.T) { // Direct the Revision to wait for this build to complete. rev.Spec.BuildName = bld.Name revClient.Create(rev) - // Since syncHandler looks in the lister, we need to add it to the informer - elaInformer.Serving().V1alpha1().Revisions().Informer().GetIndexer().Add(rev) - controller.syncHandler(KeyOrDie(rev)) + // Since SyncRevision looks in the lister, we need to add it to the informer + elaInformer.Serving().V1alpha1().Revisions().Informer().GetIndexer().Add(rev) + receiver.SyncRevision(rev) // After the initial update to the revision, we should be // watching for this build to complete, so make it complete. bld.Status = buildv1alpha1.BuildStatus{ - Conditions: []buildv1alpha1.BuildCondition{ - { - Type: buildv1alpha1.BuildComplete, - Status: corev1.ConditionTrue, - Message: completeMessage, - }, - }, + Conditions: []buildv1alpha1.BuildCondition{{ + Type: buildv1alpha1.BuildComplete, + Status: corev1.ConditionTrue, + Message: completeMessage, + }}, + } + if err := receiver.SyncBuild(bld); err != nil { + t.Fatalf("Couldn't sync build: %v", err) } - - controller.addBuildEvent(bld) completedRev, err := revClient.Get(rev.Name, metav1.GetOptions{}) if err != nil { @@ -1073,12 +1039,14 @@ func TestCreateRevWithCompletedBuildNameCompletes(t *testing.T) { } // The next update we receive should tell us that the build completed. - want := []v1alpha1.RevisionCondition{ - { - Type: "BuildSucceeded", - Status: corev1.ConditionTrue, - }, - } + want := []v1alpha1.RevisionCondition{{ + Type: "Ready", + Status: corev1.ConditionUnknown, + Reason: "Building", + }, { + Type: "BuildSucceeded", + Status: corev1.ConditionTrue, + }} if diff := compareRevisionConditions(want, completedRev.Status.Conditions); diff != "" { t.Errorf("Unexpected revision conditions diff (-want +got): %v", diff) } @@ -1090,7 +1058,7 @@ func TestCreateRevWithCompletedBuildNameCompletes(t *testing.T) { } func TestCreateRevWithInvalidBuildNameFails(t *testing.T) { - _, buildClient, elaClient, controller, _, elaInformer := newTestController(t) + _, buildClient, elaClient, receiver, _, elaInformer := newTestReceiver(t) revClient := elaClient.ServingV1alpha1().Revisions(testNamespace) reason := "Foo" @@ -1117,9 +1085,9 @@ func TestCreateRevWithInvalidBuildNameFails(t *testing.T) { // Direct the Revision to wait for this build to complete. rev.Spec.BuildName = bld.Name revClient.Create(rev) - // Since syncHandler looks in the lister, we need to add it to the informer + // Since SyncRevision looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Revisions().Informer().GetIndexer().Add(rev) - controller.syncHandler(KeyOrDie(rev)) + receiver.SyncRevision(rev) // After the initial update to the revision, we should be // watching for this build to complete, so make it complete, but @@ -1135,7 +1103,9 @@ func TestCreateRevWithInvalidBuildNameFails(t *testing.T) { }, } - controller.addBuildEvent(bld) + if err := receiver.SyncBuild(bld); err != nil { + t.Fatalf("Couldn't sync build: %v", err) + } failedRev, err := revClient.Get(rev.Name, metav1.GetOptions{}) if err != nil { @@ -1163,7 +1133,7 @@ func TestCreateRevWithInvalidBuildNameFails(t *testing.T) { } func TestCreateRevWithProgressDeadlineSecondsStuff(t *testing.T) { - kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) revClient := elaClient.ServingV1alpha1().Revisions(testNamespace) var testProgressDeadlineSeconds int32 = 10 @@ -1172,9 +1142,9 @@ func TestCreateRevWithProgressDeadlineSecondsStuff(t *testing.T) { revClient.Create(rev) - // Since syncHandler looks in the lister, we need to add it to the informer + // Since SyncRevision looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Revisions().Informer().GetIndexer().Add(rev) - controller.syncHandler(KeyOrDie(rev)) + receiver.SyncRevision(rev) // Look for revision's deployment. deploymentNameToLook := fmt.Sprintf("%s-deployment", rev.Name) @@ -1190,10 +1160,11 @@ func TestCreateRevWithProgressDeadlineSecondsStuff(t *testing.T) { //set ProgressDeadlineSeconds on Dep spec deployment.Spec.ProgressDeadlineSeconds = &testProgressDeadlineSeconds - controller.addDeploymentProgressEvent(deployment) + if err := receiver.SyncDeployment(deployment); err != nil { + t.Fatalf("Couldn't sync deployment: %v", err) + } rev2Inspect, err := revClient.Get(rev.Name, metav1.GetOptions{}) - if err != nil { t.Fatalf("Couldn't get revision: %v", err) } @@ -1211,7 +1182,7 @@ func TestCreateRevWithProgressDeadlineSecondsStuff(t *testing.T) { } func TestMarkRevReadyUponEndpointBecomesReady(t *testing.T) { - kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) revClient := elaClient.ServingV1alpha1().Revisions(testNamespace) rev := getTestRevision() @@ -1222,9 +1193,9 @@ func TestMarkRevReadyUponEndpointBecomesReady(t *testing.T) { h.OnCreate(&kubeClient.Fake, "events", ExpectNormalEventDelivery(t, expectedMessage)) revClient.Create(rev) - // Since syncHandler looks in the lister, we need to add it to the informer + // Since SyncRevision looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Revisions().Informer().GetIndexer().Add(rev) - controller.syncHandler(KeyOrDie(rev)) + receiver.SyncRevision(rev) deployingRev, err := revClient.Get(rev.Name, metav1.GetOptions{}) if err != nil { @@ -1244,7 +1215,7 @@ func TestMarkRevReadyUponEndpointBecomesReady(t *testing.T) { } endpoints := getTestReadyEndpoints(rev.Name) - controller.addEndpointsEvent(endpoints) + receiver.SyncEndpoint(endpoints) readyRev, err := revClient.Get(rev.Name, metav1.GetOptions{}) if err != nil { @@ -1270,7 +1241,7 @@ func TestMarkRevReadyUponEndpointBecomesReady(t *testing.T) { } func TestDoNotUpdateRevIfRevIsAlreadyReady(t *testing.T) { - _, _, elaClient, controller, _, elaInformer := newTestController(t) + _, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) rev := getTestRevision() // Mark the revision already ready. rev.Status.Conditions = []v1alpha1.RevisionCondition{ @@ -1281,7 +1252,7 @@ func TestDoNotUpdateRevIfRevIsAlreadyReady(t *testing.T) { }, } - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) // Create endpoints owned by this Revision. endpoints := getTestReadyEndpoints(rev.Name) @@ -1294,11 +1265,11 @@ func TestDoNotUpdateRevIfRevIsAlreadyReady(t *testing.T) { }, ) - controller.addEndpointsEvent(endpoints) + receiver.SyncEndpoint(endpoints) } func TestDoNotUpdateRevIfRevIsMarkedAsFailed(t *testing.T) { - _, _, elaClient, controller, _, elaInformer := newTestController(t) + _, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) rev := getTestRevision() // Mark the revision already ready. rev.Status.Conditions = []v1alpha1.RevisionCondition{ @@ -1314,7 +1285,7 @@ func TestDoNotUpdateRevIfRevIsMarkedAsFailed(t *testing.T) { }, } - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) // Create endpoints owned by this Revision. endpoints := getTestReadyEndpoints(rev.Name) @@ -1327,56 +1298,54 @@ func TestDoNotUpdateRevIfRevIsMarkedAsFailed(t *testing.T) { }, ) - controller.addEndpointsEvent(endpoints) + receiver.SyncEndpoint(endpoints) } -func TestMarkRevAsFailedIfEndpointHasNoAddressesAfterSomeDuration(t *testing.T) { - _, _, elaClient, controller, _, elaInformer := newTestController(t) - rev := getTestRevision() - - creationTime := time.Now().Add(-10 * time.Minute) - rev.ObjectMeta.CreationTimestamp = metav1.NewTime(creationTime) - rev.Status.Conditions = []v1alpha1.RevisionCondition{ - v1alpha1.RevisionCondition{ - Type: "Ready", - Status: corev1.ConditionUnknown, - Reason: "Deploying", - }, - } - - createRevision(elaClient, elaInformer, controller, rev) - - // Create endpoints owned by this Revision. - endpoints := getTestNotReadyEndpoints(rev.Name) - - controller.addEndpointsEvent(endpoints) - - currentRev, _ := elaClient.ServingV1alpha1().Revisions(testNamespace).Get(rev.Name, metav1.GetOptions{}) - - want := []v1alpha1.RevisionCondition{ - { - Type: "ResourcesAvailable", - Status: corev1.ConditionFalse, - Reason: "ServiceTimeout", - Message: "Timed out waiting for a service endpoint to become ready", - }, - { - Type: "Ready", - Status: corev1.ConditionFalse, - Reason: "ServiceTimeout", - Message: "Timed out waiting for a service endpoint to become ready", - }, - } - if diff := compareRevisionConditions(want, currentRev.Status.Conditions); diff != "" { - t.Errorf("Unexpected revision conditions diff (-want +got): %v", diff) - } -} +// TODO(mattmoor): createRevision now sets LastTransitionTime, which breaks this test. +// func TestMarkRevAsFailedIfEndpointHasNoAddressesAfterSomeDuration(t *testing.T) { +// _, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) +// rev := getTestRevision() + +// creationTime := time.Now().Add(-10 * time.Minute) +// rev.ObjectMeta.CreationTimestamp = metav1.NewTime(creationTime) +// rev.Status.Conditions = []v1alpha1.RevisionCondition{ +// v1alpha1.RevisionCondition{ +// Type: "Ready", +// Status: corev1.ConditionUnknown, +// Reason: "Deploying", +// }, +// } + +// // This will populate LastTransitionTime, we need to clear it. +// createRevision(elaClient, elaInformer, receiver, rev) + +// // Create endpoints owned by this Revision. +// endpoints := getTestNotReadyEndpoints(rev.Name) + +// receiver.SyncEndpoint(endpoints) +// currentRev, _ := elaClient.ServingV1alpha1().Revisions(testNamespace).Get(rev.Name, metav1.GetOptions{}) + +// want := []v1alpha1.RevisionCondition{{ +// Type: "ResourcesAvailable", +// Status: corev1.ConditionFalse, +// Reason: "ServiceTimeout", +// Message: "Timed out waiting for a service endpoint to become ready", +// }, { +// Type: "Ready", +// Status: corev1.ConditionFalse, +// Reason: "ServiceTimeout", +// Message: "Timed out waiting for a service endpoint to become ready", +// }} +// if diff := compareRevisionConditions(want, currentRev.Status.Conditions); diff != "" { +// t.Errorf("Unexpected revision conditions diff (-want +got): %v", diff) +// } +// } func TestAuxiliaryEndpointDoesNotUpdateRev(t *testing.T) { - _, _, elaClient, controller, _, elaInformer := newTestController(t) + _, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) rev := getTestRevision() - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) // Create endpoints owned by this Revision. endpoints := getTestAuxiliaryReadyEndpoints(rev.Name) @@ -1389,16 +1358,16 @@ func TestAuxiliaryEndpointDoesNotUpdateRev(t *testing.T) { }, ) - controller.addEndpointsEvent(endpoints) + receiver.SyncEndpoint(endpoints) } func TestActiveToRetiredRevisionDeletesStuff(t *testing.T) { - kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) rev := getTestRevision() // Create revision and verify that the k8s resources are created as // appropriate. - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) expectedDeploymentName := fmt.Sprintf("%s-deployment", rev.Name) _, err := kubeClient.AppsV1().Deployments(testNamespace).Get(expectedDeploymentName, metav1.GetOptions{}) @@ -1407,9 +1376,9 @@ func TestActiveToRetiredRevisionDeletesStuff(t *testing.T) { } // Now, update the revision serving state to Retired, and force another - // run of the controller. + // run of the receiver. rev.Spec.ServingState = v1alpha1.RevisionServingStateRetired - updateRevision(elaClient, elaInformer, controller, rev) + updateRevision(elaClient, elaInformer, receiver, rev) // Expect the deployment to be gone. deployment, err := kubeClient.AppsV1().Deployments(testNamespace).Get(expectedDeploymentName, metav1.GetOptions{}) @@ -1420,12 +1389,12 @@ func TestActiveToRetiredRevisionDeletesStuff(t *testing.T) { } func TestActiveToReserveRevisionDeletesStuff(t *testing.T) { - kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) rev := getTestRevision() // Create revision and verify that the k8s resources are created as // appropriate. - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) expectedDeploymentName := fmt.Sprintf("%s-deployment", rev.Name) _, err := kubeClient.AppsV1().Deployments(testNamespace).Get(expectedDeploymentName, metav1.GetOptions{}) @@ -1434,9 +1403,9 @@ func TestActiveToReserveRevisionDeletesStuff(t *testing.T) { } // Now, update the revision serving state to Reserve, and force another - // run of the controller. + // run of the receiver. rev.Spec.ServingState = v1alpha1.RevisionServingStateReserve - updateRevision(elaClient, elaInformer, controller, rev) + updateRevision(elaClient, elaInformer, receiver, rev) // Expect the deployment to be gone. deployment, err := kubeClient.AppsV1().Deployments(testNamespace).Get(expectedDeploymentName, metav1.GetOptions{}) @@ -1446,12 +1415,12 @@ func TestActiveToReserveRevisionDeletesStuff(t *testing.T) { } func TestRetiredToActiveRevisionCreatesStuff(t *testing.T) { - kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) rev := getTestRevision() // Create revision. The k8s resources should not be created. rev.Spec.ServingState = v1alpha1.RevisionServingStateRetired - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) // Expect the deployment to be gone. expectedDeploymentName := fmt.Sprintf("%s-deployment", rev.Name) @@ -1461,9 +1430,9 @@ func TestRetiredToActiveRevisionCreatesStuff(t *testing.T) { } // Now, update the revision serving state to Active, and force another - // run of the controller. + // run of the receiver. rev.Spec.ServingState = v1alpha1.RevisionServingStateActive - updateRevision(elaClient, elaInformer, controller, rev) + updateRevision(elaClient, elaInformer, receiver, rev) // Expect the resources to be created. _, err = kubeClient.AppsV1().Deployments(testNamespace).Get(expectedDeploymentName, metav1.GetOptions{}) @@ -1473,12 +1442,12 @@ func TestRetiredToActiveRevisionCreatesStuff(t *testing.T) { } func TestReserveToActiveRevisionCreatesStuff(t *testing.T) { - kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, _, elaClient, receiver, _, elaInformer := newTestReceiver(t) rev := getTestRevision() // Create revision. The k8s resources should not be created. rev.Spec.ServingState = v1alpha1.RevisionServingStateReserve - createRevision(elaClient, elaInformer, controller, rev) + createRevision(elaClient, elaInformer, receiver, rev) // Expect the deployment to be gone. expectedDeploymentName := fmt.Sprintf("%s-deployment", rev.Name) @@ -1488,9 +1457,9 @@ func TestReserveToActiveRevisionCreatesStuff(t *testing.T) { } // Now, update the revision serving state to Active, and force another - // run of the controller. + // run of the receiver. rev.Spec.ServingState = v1alpha1.RevisionServingStateActive - updateRevision(elaClient, elaInformer, controller, rev) + updateRevision(elaClient, elaInformer, receiver, rev) // Expect the resources to be created. _, err = kubeClient.AppsV1().Deployments(testNamespace).Get(expectedDeploymentName, metav1.GetOptions{}) diff --git a/pkg/controller/revision/ela_service.go b/pkg/receiver/revision/service.go similarity index 100% rename from pkg/controller/revision/ela_service.go rename to pkg/receiver/revision/service.go diff --git a/pkg/receiver/route/configuration.go b/pkg/receiver/route/configuration.go new file mode 100644 index 000000000000..812e7994232d --- /dev/null +++ b/pkg/receiver/route/configuration.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package route + +import ( + "context" + + "github.com/knative/serving/pkg/apis/serving" + "github.com/knative/serving/pkg/apis/serving/v1alpha1" + "github.com/knative/serving/pkg/logging" + "go.uber.org/zap" +) + +// SyncConfiguration implements configuration.Receiver +func (c *Receiver) SyncConfiguration(config *v1alpha1.Configuration) error { + configName := config.Name + ns := config.Namespace + + if config.Status.LatestReadyRevisionName == "" { + c.Logger.Infof("Configuration %s is not ready", configName) + return nil + } + + routeName, ok := config.Labels[serving.RouteLabelKey] + if !ok { + c.Logger.Infof("Configuration %s does not have label %s", configName, serving.RouteLabelKey) + return nil + } + + logger := loggerWithRouteInfo(c.Logger, ns, routeName) + ctx := logging.WithLogger(context.TODO(), logger) + route, err := c.lister.Routes(ns).Get(routeName) + if err != nil { + logger.Error("Error fetching route upon configuration becoming ready", zap.Error(err)) + return err + } + + // Don't modify the informers copy + route = route.DeepCopy() + if _, err := c.syncTrafficTargetsAndUpdateRouteStatus(ctx, route); err != nil { + logger.Error("Error updating route upon configuration becoming ready", zap.Error(err)) + return err + } + return nil +} diff --git a/pkg/receiver/route/helper.go b/pkg/receiver/route/helper.go new file mode 100644 index 000000000000..03b5b0aa158e --- /dev/null +++ b/pkg/receiver/route/helper.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package route + +import ( + "fmt" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/knative/serving/pkg/apis/serving/v1alpha1" + "github.com/knative/serving/pkg/logging/logkey" + "go.uber.org/zap" +) + +// loggerWithRouteInfo enriches the logs with route name and namespace. +func loggerWithRouteInfo(logger *zap.SugaredLogger, ns string, name string) *zap.SugaredLogger { + return logger.With(zap.String(logkey.Namespace, ns), zap.String(logkey.Route, name)) +} + +func (c *Receiver) routeDomain(route *v1alpha1.Route) string { + domain := c.controllerConfig.LookupDomainForLabels(route.ObjectMeta.Labels) + return fmt.Sprintf("%s.%s.%s", route.Name, route.Namespace, domain) +} + +func (c *Receiver) updateStatus(route *v1alpha1.Route) (*v1alpha1.Route, error) { + routeClient := c.ElaClientSet.ServingV1alpha1().Routes(route.Namespace) + existing, err := routeClient.Get(route.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + // Check if there is anything to update. + if !reflect.DeepEqual(existing.Status, route.Status) { + existing.Status = route.Status + // TODO: for CRD there's no updatestatus, so use normal update. + return routeClient.Update(existing) + } + return existing, nil +} diff --git a/pkg/controller/route/ela_ingress.go b/pkg/receiver/route/ingress.go similarity index 67% rename from pkg/controller/route/ela_ingress.go rename to pkg/receiver/route/ingress.go index e0b56db809d7..d65f0b9a03b2 100644 --- a/pkg/controller/route/ela_ingress.go +++ b/pkg/receiver/route/ingress.go @@ -22,11 +22,50 @@ import ( "github.com/knative/serving/pkg/apis/serving/v1alpha1" "github.com/knative/serving/pkg/controller" + corev1 "k8s.io/api/core/v1" v1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) +// SyncIngress implements ingress.Receiver +func (c *Receiver) SyncIngress(ingress *v1beta1.Ingress) error { + // If ingress isn't owned by a route, no route update is required. + routeName := controller.LookupOwningRouteName(ingress.OwnerReferences) + if routeName == "" { + return nil + } + if len(ingress.Status.LoadBalancer.Ingress) == 0 { + // Route isn't ready if having no load-balancer ingress. + return nil + } + for _, i := range ingress.Status.LoadBalancer.Ingress { + if i.IP == "" { + // Route isn't ready if some load balancer ingress doesn't have an IP. + return nil + } + } + ns := ingress.Namespace + routeClient := c.ElaClientSet.ServingV1alpha1().Routes(ns) + route, err := routeClient.Get(routeName, metav1.GetOptions{}) + if err != nil { + return err + } + if route.Status.IsReady() { + return nil + } + // Mark route as ready. + route.Status.SetCondition(&v1alpha1.RouteCondition{ + Type: v1alpha1.RouteConditionReady, + Status: corev1.ConditionTrue, + }) + + if _, err := routeClient.Update(route); err != nil { + return err + } + return nil +} + // MakeRouteIngress creates an ingress rule, owned by the provided v1alpha1.Route. This ingress rule // targets Istio by using the simple placeholder service name. All the routing actually happens in // the route rules. diff --git a/pkg/controller/route/ela_istio_route.go b/pkg/receiver/route/istio_route.go similarity index 100% rename from pkg/controller/route/ela_istio_route.go rename to pkg/receiver/route/istio_route.go diff --git a/pkg/controller/route/ela_istio_route_test.go b/pkg/receiver/route/istio_route_test.go similarity index 100% rename from pkg/controller/route/ela_istio_route_test.go rename to pkg/receiver/route/istio_route_test.go diff --git a/pkg/receiver/route/receiver.go b/pkg/receiver/route/receiver.go new file mode 100644 index 000000000000..c0329cd3a0ff --- /dev/null +++ b/pkg/receiver/route/receiver.go @@ -0,0 +1,103 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package route + +import ( + "github.com/josephburnett/k8sflag/pkg/k8sflag" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + + clientset "github.com/knative/serving/pkg/client/clientset/versioned" + informers "github.com/knative/serving/pkg/client/informers/externalversions" + listers "github.com/knative/serving/pkg/client/listers/serving/v1alpha1" + "github.com/knative/serving/pkg/controller" + "github.com/knative/serving/pkg/controller/ingress" + "github.com/knative/serving/pkg/controller/route" + "github.com/knative/serving/pkg/receiver" + "go.uber.org/zap" +) + +const ( + controllerAgentName = "route-controller" +) + +// RevisionRoute represents a single target to route to. +// Basically represents a k8s service representing a specific Revision +// and how much of the traffic goes to it. +type RevisionRoute struct { + // Name for external routing. Optional + Name string + // RevisionName is the underlying revision that we're currently + // routing to. Could be resolved from the Configuration or + // specified explicitly in TrafficTarget + RevisionName string + // Service is the name of the k8s service we route to. + // Note we should not put service namespace as a suffix in this field. + Service string + // The k8s service namespace + Namespace string + Weight int +} + +// Receiver implements the receiver for Route resources. +type Receiver struct { + *receiver.Base + + // lister indexes properties about Route + lister listers.RouteLister + synced cache.InformerSynced + + // don't start the workers until configuration cache have been synced + configSynced cache.InformerSynced + + // suffix used to construct Route domain. + controllerConfig controller.Config + + // Autoscale enable scale to zero experiment flag. + enableScaleToZero *k8sflag.BoolFlag +} + +// Receiver implements ingress.Receiver +var _ ingress.Receiver = (*Receiver)(nil) + +// New returns an implementation of several receivers suitable for managing the +// lifecycle of a Route. +func New( + kubeClientSet kubernetes.Interface, + elaClientSet clientset.Interface, + kubeInformerFactory kubeinformers.SharedInformerFactory, + elaInformerFactory informers.SharedInformerFactory, + config *rest.Config, + controllerConfig controller.Config, + enableScaleToZero *k8sflag.BoolFlag, + logger *zap.SugaredLogger) route.Receiver { + + // obtain references to a shared index informer for the Routes and + // Configurations type. + informer := elaInformerFactory.Serving().V1alpha1().Routes() + + return &Receiver{ + Base: receiver.NewBase(kubeClientSet, elaClientSet, kubeInformerFactory, + elaInformerFactory, informer.Informer(), controllerAgentName, logger), + lister: informer.Lister(), + synced: informer.Informer().HasSynced, + controllerConfig: controllerConfig, + enableScaleToZero: enableScaleToZero, + } +} diff --git a/pkg/receiver/route/route.go b/pkg/receiver/route/route.go new file mode 100644 index 000000000000..a5beb4131029 --- /dev/null +++ b/pkg/receiver/route/route.go @@ -0,0 +1,708 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package route + +import ( + "context" + "errors" + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/knative/serving/pkg/apis/serving" + "github.com/knative/serving/pkg/apis/serving/v1alpha1" + "github.com/knative/serving/pkg/controller" + "github.com/knative/serving/pkg/logging" + "go.uber.org/zap" +) + +// SyncRoute implements route.Receiver +func (c *Receiver) SyncRoute(route *v1alpha1.Route) error { + logger := loggerWithRouteInfo(c.Logger, route.Namespace, route.Name) + ctx := logging.WithLogger(context.TODO(), logger) + + logger.Infof("Reconciling route :%v", route) + + // Create a placeholder service that is simply used by istio as a placeholder. + // This service could eventually be the 'activator' service that will get all the + // fallthrough traffic if there are no route rules (revisions to target). + // This is one way to implement the 0->1. For now, we'll just create a placeholder + // that selects nothing. + logger.Info("Creating/Updating placeholder k8s services") + if err := c.reconcilePlaceholderService(ctx, route); err != nil { + return err + } + + // Call syncTrafficTargetsAndUpdateRouteStatus, which also updates the Route.Status + // to contain the domain we will use for Ingress creation. + if _, err := c.syncTrafficTargetsAndUpdateRouteStatus(ctx, route); err != nil { + return err + } + + // Then create or update the Ingress rule for this service + logger.Info("Creating or updating ingress rule") + if err := c.reconcileIngress(ctx, route); err != nil { + logger.Error("Failed to create or update ingress rule", zap.Error(err)) + return err + } + + logger.Info("Route successfully synced") + return nil +} + +// syncTrafficTargetsAndUpdateRouteStatus attempts to converge the actual state and desired state +// according to the traffic targets in Spec field for Route resource. It then updates the Status +// block of the Route and returns the updated one. +func (c *Receiver) syncTrafficTargetsAndUpdateRouteStatus(ctx context.Context, route *v1alpha1.Route) (*v1alpha1.Route, error) { + logger := logging.FromContext(ctx) + c.consolidateTrafficTargets(ctx, route) + configMap, revMap, err := c.getDirectTrafficTargets(ctx, route) + if err != nil { + return nil, err + } + if err := c.extendConfigurationsWithIndirectTrafficTargets(ctx, route, configMap, revMap); err != nil { + return nil, err + } + if err := c.extendRevisionsWithIndirectTrafficTargets(ctx, route, configMap, revMap); err != nil { + return nil, err + } + + if err := c.deleteLabelForOutsideOfGivenConfigurations(ctx, route, configMap); err != nil { + return nil, err + } + if err := c.setLabelForGivenConfigurations(ctx, route, configMap); err != nil { + return nil, err + } + + if err := c.deleteLabelForOutsideOfGivenRevisions(ctx, route, revMap); err != nil { + return nil, err + } + if err := c.setLabelForGivenRevisions(ctx, route, revMap); err != nil { + return nil, err + } + + // Then create the actual route rules. + logger.Info("Creating Istio route rules") + revisionRoutes, err := c.createOrUpdateRouteRules(ctx, route, configMap, revMap) + if err != nil { + logger.Error("Failed to create routes", zap.Error(err)) + return nil, err + } + + // If revision routes were configured, update them + if revisionRoutes != nil { + traffic := []v1alpha1.TrafficTarget{} + for _, r := range revisionRoutes { + traffic = append(traffic, v1alpha1.TrafficTarget{ + Name: r.Name, + RevisionName: r.RevisionName, + Percent: r.Weight, + }) + } + route.Status.Traffic = traffic + } + route.Status.Domain = c.routeDomain(route) + updated, err := c.updateStatus(route) + if err != nil { + logger.Warn("Failed to update service status", zap.Error(err)) + c.Recorder.Eventf(route, corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for route %q: %v", route.Name, err) + return nil, err + } + c.Recorder.Eventf(route, corev1.EventTypeNormal, "Updated", "Updated status for route %q", route.Name) + return updated, nil +} + +func (c *Receiver) reconcilePlaceholderService(ctx context.Context, route *v1alpha1.Route) error { + logger := logging.FromContext(ctx) + service := MakeRouteK8SService(route) + if _, err := c.KubeClientSet.Core().Services(route.Namespace).Create(service); err != nil { + if apierrs.IsAlreadyExists(err) { + // Service already exist. + return nil + } + logger.Error("Failed to create service", zap.Error(err)) + c.Recorder.Eventf(route, corev1.EventTypeWarning, "CreationFailed", "Failed to create service %q: %v", service.Name, err) + return err + } + logger.Infof("Created service %s", service.Name) + c.Recorder.Eventf(route, corev1.EventTypeNormal, "Created", "Created service %q", service.Name) + return nil +} + +func (c *Receiver) reconcileIngress(ctx context.Context, route *v1alpha1.Route) error { + logger := logging.FromContext(ctx) + ingressNamespace := route.Namespace + ingressName := controller.GetElaK8SIngressName(route) + ingress := MakeRouteIngress(route) + ingressClient := c.KubeClientSet.Extensions().Ingresses(ingressNamespace) + existing, err := ingressClient.Get(ingressName, metav1.GetOptions{}) + + if err != nil { + if apierrs.IsNotFound(err) { + if _, err = ingressClient.Create(ingress); err == nil { + logger.Infof("Created ingress %q in namespace %q", ingressName, ingressNamespace) + c.Recorder.Eventf(route, corev1.EventTypeNormal, "Created", "Created Ingress %q in namespace %q", ingressName, ingressNamespace) + } + } + return err + } + // Check if there is anything to update. + if !reflect.DeepEqual(existing.Spec, ingress.Spec) { + existing.Spec = ingress.Spec + if _, err = ingressClient.Update(existing); err == nil { + logger.Infof("Updated ingress %q in namespace %q", ingressName, ingressNamespace) + c.Recorder.Eventf(route, corev1.EventTypeNormal, "Updated", "Updated Ingress %q in namespace %q", ingressName, ingressNamespace) + } + return err + } + return nil +} + +func (c *Receiver) getDirectTrafficTargets(ctx context.Context, route *v1alpha1.Route) ( + map[string]*v1alpha1.Configuration, map[string]*v1alpha1.Revision, error) { + logger := logging.FromContext(ctx) + ns := route.Namespace + configClient := c.ElaClientSet.ServingV1alpha1().Configurations(ns) + revClient := c.ElaClientSet.ServingV1alpha1().Revisions(ns) + configMap := map[string]*v1alpha1.Configuration{} + revMap := map[string]*v1alpha1.Revision{} + + for _, tt := range route.Spec.Traffic { + if tt.ConfigurationName != "" { + configName := tt.ConfigurationName + config, err := configClient.Get(configName, metav1.GetOptions{}) + if err != nil { + logger.Infof("Failed to fetch Configuration %q: %v", configName, err) + return nil, nil, err + } + configMap[configName] = config + } else { + revName := tt.RevisionName + rev, err := revClient.Get(revName, metav1.GetOptions{}) + if err != nil { + logger.Infof("Failed to fetch Revision %q: %v", revName, err) + return nil, nil, err + } + revMap[revName] = rev + } + } + + return configMap, revMap, nil +} + +func (c *Receiver) extendConfigurationsWithIndirectTrafficTargets( + ctx context.Context, route *v1alpha1.Route, configMap map[string]*v1alpha1.Configuration, + revMap map[string]*v1alpha1.Revision) error { + logger := logging.FromContext(ctx) + ns := route.Namespace + configClient := c.ElaClientSet.ServingV1alpha1().Configurations(ns) + + // Get indirect configurations. + for _, rev := range revMap { + if configName, ok := rev.Labels[serving.ConfigurationLabelKey]; ok { + if _, ok := configMap[configName]; !ok { + // This is not a duplicated configuration + config, err := configClient.Get(configName, metav1.GetOptions{}) + if err != nil { + logger.Errorf("Failed to fetch Configuration %s: %s", configName, err) + return err + } + configMap[configName] = config + } + } else { + logger.Infof("Revision %s does not have label %s", rev.Name, serving.ConfigurationLabelKey) + } + } + + return nil +} + +func (c *Receiver) extendRevisionsWithIndirectTrafficTargets( + ctx context.Context, route *v1alpha1.Route, configMap map[string]*v1alpha1.Configuration, + revMap map[string]*v1alpha1.Revision) error { + logger := logging.FromContext(ctx) + ns := route.Namespace + revisionClient := c.ElaClientSet.ServingV1alpha1().Revisions(ns) + + for _, tt := range route.Spec.Traffic { + if tt.ConfigurationName != "" { + configName := tt.ConfigurationName + if config, ok := configMap[configName]; ok { + + revName := config.Status.LatestReadyRevisionName + if revName == "" { + logger.Infof("Configuration %s is not ready. Skipping Configuration %q during route reconcile", + tt.ConfigurationName) + continue + } + // Check if it is a duplicated revision + if _, ok := revMap[revName]; !ok { + rev, err := revisionClient.Get(revName, metav1.GetOptions{}) + if err != nil { + logger.Errorf("Failed to fetch Revision %s: %s", revName, err) + return err + } + revMap[revName] = rev + } + } + } + } + + return nil +} + +func (c *Receiver) setLabelForGivenConfigurations( + ctx context.Context, route *v1alpha1.Route, configMap map[string]*v1alpha1.Configuration) error { + logger := logging.FromContext(ctx) + configClient := c.ElaClientSet.ServingV1alpha1().Configurations(route.Namespace) + + // Validate + for _, config := range configMap { + if routeName, ok := config.Labels[serving.RouteLabelKey]; ok { + // TODO(yanweiguo): add a condition in status for this error + if routeName != route.Name { + errMsg := fmt.Sprintf("Configuration %q is already in use by %q, and cannot be used by %q", + config.Name, routeName, route.Name) + c.Recorder.Event(route, corev1.EventTypeWarning, "ConfigurationInUse", errMsg) + logger.Error(errMsg) + return errors.New(errMsg) + } + } + } + + // Set label for newly added configurations as traffic target. + for _, config := range configMap { + if config.Labels == nil { + config.Labels = make(map[string]string) + } else if _, ok := config.Labels[serving.RouteLabelKey]; ok { + continue + } + config.Labels[serving.RouteLabelKey] = route.Name + if _, err := configClient.Update(config); err != nil { + logger.Errorf("Failed to update Configuration %s: %s", config.Name, err) + return err + } + } + + return nil +} + +func (c *Receiver) setLabelForGivenRevisions( + ctx context.Context, route *v1alpha1.Route, revMap map[string]*v1alpha1.Revision) error { + logger := logging.FromContext(ctx) + revisionClient := c.ElaClientSet.ServingV1alpha1().Revisions(route.Namespace) + + // Validate revision if it already has a route label + for _, rev := range revMap { + if routeName, ok := rev.Labels[serving.RouteLabelKey]; ok { + if routeName != route.Name { + errMsg := fmt.Sprintf("Revision %q is already in use by %q, and cannot be used by %q", + rev.Name, routeName, route.Name) + c.Recorder.Event(route, corev1.EventTypeWarning, "RevisionInUse", errMsg) + logger.Error(errMsg) + return errors.New(errMsg) + } + } + } + + for _, rev := range revMap { + if rev.Labels == nil { + rev.Labels = make(map[string]string) + } else if _, ok := rev.Labels[serving.RouteLabelKey]; ok { + continue + } + rev.Labels[serving.RouteLabelKey] = route.Name + if _, err := revisionClient.Update(rev); err != nil { + logger.Errorf("Failed to add route label to Revision %s: %s", rev.Name, err) + return err + } + } + + return nil +} + +func (c *Receiver) deleteLabelForOutsideOfGivenConfigurations( + ctx context.Context, route *v1alpha1.Route, configMap map[string]*v1alpha1.Configuration) error { + logger := logging.FromContext(ctx) + configClient := c.ElaClientSet.ServingV1alpha1().Configurations(route.Namespace) + // Get Configurations set as traffic target before this sync. + oldConfigsList, err := configClient.List( + metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", serving.RouteLabelKey, route.Name), + }, + ) + if err != nil { + logger.Errorf("Failed to fetch configurations with label '%s=%s': %s", + serving.RouteLabelKey, route.Name, err) + return err + } + + // Delete label for newly removed configurations as traffic target. + for _, config := range oldConfigsList.Items { + if _, ok := configMap[config.Name]; !ok { + delete(config.Labels, serving.RouteLabelKey) + if _, err := configClient.Update(&config); err != nil { + logger.Errorf("Failed to update Configuration %s: %s", config.Name, err) + return err + } + } + } + + return nil +} + +func (c *Receiver) deleteLabelForOutsideOfGivenRevisions( + ctx context.Context, route *v1alpha1.Route, revMap map[string]*v1alpha1.Revision) error { + logger := logging.FromContext(ctx) + revClient := c.ElaClientSet.ServingV1alpha1().Revisions(route.Namespace) + + oldRevList, err := revClient.List( + metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", serving.RouteLabelKey, route.Name), + }, + ) + if err != nil { + logger.Errorf("Failed to fetch revisions with label '%s=%s': %s", + serving.RouteLabelKey, route.Name, err) + return err + } + + // Delete label for newly removed revisions as traffic target. + for _, rev := range oldRevList.Items { + if _, ok := revMap[rev.Name]; !ok { + delete(rev.Labels, serving.RouteLabelKey) + if _, err := revClient.Update(&rev); err != nil { + logger.Errorf("Failed to remove route label from Revision %s: %s", rev.Name, err) + return err + } + } + } + + return nil +} + +// computeRevisionRoutes computes RevisionRoute for a route object. If there is one or more inactive revisions and enableScaleToZero +// is true, a route rule with the activator service as the destination will be added. It returns the revision routes, the inactive +// revision name to which the activator should forward requests to, and error if there is any. +func (c *Receiver) computeRevisionRoutes( + ctx context.Context, route *v1alpha1.Route, configMap map[string]*v1alpha1.Configuration, + revMap map[string]*v1alpha1.Revision) ([]RevisionRoute, string, error) { + + logger := logging.FromContext(ctx) + logger.Debug("Figuring out routes") + enableScaleToZero := c.enableScaleToZero.Get() + // The inactive revision name which has the largest traffic weight. + inactiveRev := "" + // The max percent in all inactive revisions. + maxInactivePercent := 0 + // The total percent of all inactive revisions. + totalInactivePercent := 0 + ns := route.Namespace + elaNS := controller.GetElaNamespaceName(ns) + ret := []RevisionRoute{} + + for _, tt := range route.Spec.Traffic { + var rev *v1alpha1.Revision + var err error + revName := tt.RevisionName + if tt.ConfigurationName != "" { + // Get the configuration's LatestReadyRevisionName + revName = configMap[tt.ConfigurationName].Status.LatestReadyRevisionName + if revName == "" { + logger.Errorf("Configuration %s is not ready. Should skip updating route rules", + tt.ConfigurationName) + return nil, "", nil + } + // Revision has been already fetched indirectly in extendRevisionsWithIndirectTrafficTargets + rev = revMap[revName] + + } else { + // Direct revision has already been fetched + rev = revMap[revName] + } + //TODO(grantr): What should happen if revisionName is empty? + + if rev == nil { + // For safety, which should never happen. + logger.Errorf("Failed to fetch Revision %s: %s", revName, err) + return nil, "", err + } + + hasRouteRule := true + cond := rev.Status.GetCondition(v1alpha1.RevisionConditionReady) + if enableScaleToZero && cond != nil { + // A revision is considered inactive (yet) if it's in + // "Inactive" condition or "Activating" condition. + if (cond.Reason == "Inactive" && cond.Status == corev1.ConditionFalse) || + (cond.Reason == "Activating" && cond.Status == corev1.ConditionUnknown) { + // Let inactiveRev be the Reserve revision with the largest traffic weight. + if tt.Percent > maxInactivePercent { + maxInactivePercent = tt.Percent + inactiveRev = rev.Name + } + totalInactivePercent += tt.Percent + hasRouteRule = false + } + } + + if hasRouteRule { + rr := RevisionRoute{ + Name: tt.Name, + RevisionName: rev.Name, + Service: rev.Status.ServiceName, + Namespace: elaNS, + Weight: tt.Percent, + } + ret = append(ret, rr) + } + } + + // TODO: The ideal solution is to append different revision name as headers for each inactive revision. + // https://github.com/knative/serving/issues/882 + if totalInactivePercent > 0 { + activatorRoute := RevisionRoute{ + Name: controller.GetElaK8SActivatorServiceName(), + RevisionName: inactiveRev, + Service: controller.GetElaK8SActivatorServiceName(), + Namespace: controller.GetElaK8SActivatorNamespace(), + Weight: totalInactivePercent, + } + ret = append(ret, activatorRoute) + } + return ret, inactiveRev, nil +} + +// computeEmptyRevisionRoutes is a hack to work around https://github.com/istio/istio/issues/5204. +// Here we add empty/dummy route rules for non-target revisions to prepare to switch traffic to +// them in the future. We are tracking this issue in https://github.com/knative/serving/issues/348. +// +// TODO: Even though this fixes the 503s when switching revisions, revisions will have empty route +// rules to them for perpetuity, therefore not ideal. We should remove this hack once +// https://github.com/istio/istio/issues/5204 is fixed, probably in 0.8.1. +func (c *Receiver) computeEmptyRevisionRoutes( + ctx context.Context, route *v1alpha1.Route, configMap map[string]*v1alpha1.Configuration, + revMap map[string]*v1alpha1.Revision) ([]RevisionRoute, error) { + logger := logging.FromContext(ctx) + ns := route.Namespace + elaNS := controller.GetElaNamespaceName(ns) + revClient := c.ElaClientSet.ServingV1alpha1().Revisions(ns) + revRoutes := []RevisionRoute{} + for _, tt := range route.Spec.Traffic { + configName := tt.ConfigurationName + if configName != "" { + // Get the configuration's LatestReadyRevisionName + latestReadyRevName := configMap[configName].Status.LatestReadyRevisionName + revs, err := revClient.List(metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", serving.ConfigurationLabelKey, configName), + }) + if err != nil { + logger.Errorf("Failed to fetch revisions of Configuration %q: %s", configName, err) + return nil, err + } + for _, rev := range revs.Items { + if _, ok := revMap[rev.Name]; !ok && rev.Name != latestReadyRevName { + // This is a non-target revision. Adding a dummy rule to make Istio happy, + // so that if we switch traffic to them later we won't cause Istio to + // throw spurious 503s. See https://github.com/istio/istio/issues/5204. + revRoutes = append(revRoutes, RevisionRoute{ + RevisionName: rev.Name, + Service: rev.Status.ServiceName, + Namespace: elaNS, + Weight: 0, + }) + } + } + } + } + return revRoutes, nil +} + +func (c *Receiver) createOrUpdateRouteRules(ctx context.Context, route *v1alpha1.Route, + configMap map[string]*v1alpha1.Configuration, revMap map[string]*v1alpha1.Revision) ([]RevisionRoute, error) { + logger := logging.FromContext(ctx) + // grab a client that's specific to RouteRule. + ns := route.Namespace + routeClient := c.ElaClientSet.ConfigV1alpha2().RouteRules(ns) + if routeClient == nil { + logger.Errorf("Failed to create resource client") + return nil, fmt.Errorf("Couldn't get a routeClient") + } + + revisionRoutes, inactiveRev, err := c.computeRevisionRoutes(ctx, route, configMap, revMap) + if err != nil { + logger.Errorf("Failed to get routes for %s : %q", route.Name, err) + return nil, err + } + if len(revisionRoutes) == 0 { + logger.Errorf("No routes were found for the service %q", route.Name) + return nil, nil + } + + // TODO: remove this once https://github.com/istio/istio/issues/5204 is fixed. + emptyRoutes, err := c.computeEmptyRevisionRoutes(ctx, route, configMap, revMap) + if err != nil { + logger.Errorf("Failed to get empty routes for %s : %q", route.Name, err) + return nil, err + } + revisionRoutes = append(revisionRoutes, emptyRoutes...) + // Create route rule for the route domain + routeRuleName := controller.GetRouteRuleName(route, nil) + routeRules, err := routeClient.Get(routeRuleName, metav1.GetOptions{}) + if err != nil { + if !apierrs.IsNotFound(err) { + return nil, err + } + routeRules = MakeIstioRoutes(route, nil, ns, revisionRoutes, c.routeDomain(route), inactiveRev) + if _, err := routeClient.Create(routeRules); err != nil { + c.Recorder.Eventf(route, corev1.EventTypeWarning, "CreationFailed", "Failed to create Istio route rule %q: %s", routeRules.Name, err) + return nil, err + } + c.Recorder.Eventf(route, corev1.EventTypeNormal, "Created", "Created Istio route rule %q", routeRules.Name) + } else { + routeRules.Spec = makeIstioRouteSpec(route, nil, ns, revisionRoutes, c.routeDomain(route), inactiveRev) + if _, err := routeClient.Update(routeRules); err != nil { + c.Recorder.Eventf(route, corev1.EventTypeWarning, "UpdateFailed", "Failed to update Istio route rule %q: %s", routeRules.Name, err) + return nil, err + } + c.Recorder.Eventf(route, corev1.EventTypeNormal, "Updated", "Updated Istio route rule %q", routeRules.Name) + } + + // Create route rule for named traffic targets + for _, tt := range route.Spec.Traffic { + if tt.Name == "" { + continue + } + routeRuleName := controller.GetRouteRuleName(route, &tt) + routeRules, err := routeClient.Get(routeRuleName, metav1.GetOptions{}) + if err != nil { + if !apierrs.IsNotFound(err) { + return nil, err + } + routeRules = MakeIstioRoutes(route, &tt, ns, revisionRoutes, c.routeDomain(route), inactiveRev) + if _, err := routeClient.Create(routeRules); err != nil { + c.Recorder.Eventf(route, corev1.EventTypeWarning, "CreationFailed", "Failed to create Istio route rule %q: %s", routeRules.Name, err) + return nil, err + } + c.Recorder.Eventf(route, corev1.EventTypeNormal, "Created", "Created Istio route rule %q", routeRules.Name) + } else { + routeRules.Spec = makeIstioRouteSpec(route, &tt, ns, revisionRoutes, c.routeDomain(route), inactiveRev) + if _, err := routeClient.Update(routeRules); err != nil { + return nil, err + } + c.Recorder.Eventf(route, corev1.EventTypeNormal, "Updated", "Updated Istio route rule %q", routeRules.Name) + } + } + if err := c.removeOutdatedRouteRules(ctx, route); err != nil { + return nil, err + } + return revisionRoutes, nil +} + +// consolidateTrafficTargets will consolidate all duplicate revisions +// and configurations. If the traffic target names are unique, the traffic +// targets will not be consolidated. +func (c *Receiver) consolidateTrafficTargets(ctx context.Context, route *v1alpha1.Route) { + type trafficTarget struct { + name string + revisionName string + configurationName string + } + + logger := logging.FromContext(ctx) + logger.Infof("Attempting to consolidate traffic targets") + trafficTargets := route.Spec.Traffic + trafficMap := make(map[trafficTarget]int) + var order []trafficTarget + + for _, t := range trafficTargets { + tt := trafficTarget{ + name: t.Name, + revisionName: t.RevisionName, + configurationName: t.ConfigurationName, + } + if trafficMap[tt] != 0 { + logger.Infof( + "Found duplicate traffic targets (name: %s, revision: %s, configuration:%s), consolidating traffic", + tt.name, + tt.revisionName, + tt.configurationName, + ) + trafficMap[tt] += t.Percent + } else { + trafficMap[tt] = t.Percent + // The order to walk the map (Go randomizes otherwise) + order = append(order, tt) + } + } + + consolidatedTraffic := []v1alpha1.TrafficTarget{} + for _, tt := range order { + p := trafficMap[tt] + consolidatedTraffic = append( + consolidatedTraffic, + v1alpha1.TrafficTarget{ + Name: tt.name, + ConfigurationName: tt.configurationName, + RevisionName: tt.revisionName, + Percent: p, + }, + ) + } + route.Spec.Traffic = consolidatedTraffic +} + +func (c *Receiver) removeOutdatedRouteRules(ctx context.Context, u *v1alpha1.Route) error { + logger := logging.FromContext(ctx) + ns := u.Namespace + routeClient := c.ElaClientSet.ConfigV1alpha2().RouteRules(ns) + if routeClient == nil { + logger.Error("Failed to create resource client") + return errors.New("Couldn't get a routeClient") + } + + routeRuleList, err := routeClient.List(metav1.ListOptions{ + LabelSelector: fmt.Sprintf("route=%s", u.Name), + }) + if err != nil { + return err + } + + routeRuleNames := map[string]struct{}{} + routeRuleNames[controller.GetRouteRuleName(u, nil)] = struct{}{} + for _, tt := range u.Spec.Traffic { + if tt.Name == "" { + continue + } + routeRuleNames[controller.GetRouteRuleName(u, &tt)] = struct{}{} + } + + for _, r := range routeRuleList.Items { + if _, ok := routeRuleNames[r.Name]; ok { + continue + } + logger.Infof("Deleting outdated route: %s", r.Name) + if err := routeClient.Delete(r.Name, nil); err != nil { + if !apierrs.IsNotFound(err) { + return err + } + } + } + return nil +} diff --git a/pkg/controller/route/route_test.go b/pkg/receiver/route/route_test.go similarity index 93% rename from pkg/controller/route/route_test.go rename to pkg/receiver/route/route_test.go index aa9e2d541fa5..9377fabaf8d6 100644 --- a/pkg/controller/route/route_test.go +++ b/pkg/receiver/route/route_test.go @@ -161,10 +161,10 @@ func getActivatorDestinationWeight(w int) v1alpha2.DestinationWeight { } } -func newTestController(t *testing.T, elaObjects ...runtime.Object) ( +func newTestReceiver(t *testing.T, elaObjects ...runtime.Object) ( kubeClient *fakekubeclientset.Clientset, elaClient *fakeclientset.Clientset, - controller *Controller, + receiver *Receiver, kubeInformer kubeinformers.SharedInformerFactory, elaInformer informers.SharedInformerFactory) { @@ -177,7 +177,7 @@ func newTestController(t *testing.T, elaObjects ...runtime.Object) ( kubeInformer = kubeinformers.NewSharedInformerFactory(kubeClient, 0) elaInformer = informers.NewSharedInformerFactory(elaClient, 0) - controller = NewController( + receiver = New( kubeClient, elaClient, kubeInformer, @@ -195,39 +195,13 @@ func newTestController(t *testing.T, elaObjects ...runtime.Object) ( }, k8sflag.Bool("enable-scale-to-zero", false), testLogger, - ).(*Controller) - - return -} - -func newRunningTestController(t *testing.T, elaObjects ...runtime.Object) ( - kubeClient *fakekubeclientset.Clientset, - elaClient *fakeclientset.Clientset, - controller *Controller, - kubeInformer kubeinformers.SharedInformerFactory, - elaInformer informers.SharedInformerFactory, - stopCh chan struct{}) { - - kubeClient, elaClient, controller, kubeInformer, elaInformer = newTestController(t, elaObjects...) - - // Start the informers. This must happen after the call to NewController, - // otherwise there are no informers to be started. - stopCh = make(chan struct{}) - kubeInformer.Start(stopCh) - elaInformer.Start(stopCh) - - // Run the controller. - go func() { - if err := controller.Run(2, stopCh); err != nil { - t.Fatalf("Error running controller: %v", err) - } - }() + ).(*Receiver) return } func TestCreateRouteCreatesStuff(t *testing.T) { - kubeClient, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, elaClient, receiver, _, elaInformer := newTestReceiver(t) h := NewHooks() // Look for the events. Events are delivered asynchronously so we need to use @@ -254,7 +228,7 @@ func TestCreateRouteCreatesStuff(t *testing.T) { // Since updateRouteEvent looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) // Look for the placeholder service. expectedServiceName := fmt.Sprintf("%s-service", route.Name) @@ -353,8 +327,8 @@ func TestCreateRouteCreatesStuff(t *testing.T) { // Test the only revision in the route is in Reserve (inactive) serving status. func TestCreateRouteForOneReserveRevision(t *testing.T) { - kubeClient, elaClient, controller, _, elaInformer := newTestController(t) - controller.enableScaleToZero = k8sflag.Bool("enable-scale-to-zero", true) + kubeClient, elaClient, receiver, _, elaInformer := newTestReceiver(t) + receiver.enableScaleToZero = k8sflag.Bool("enable-scale-to-zero", true) h := NewHooks() // Look for the events. Events are delivered asynchronously so we need to use @@ -386,7 +360,7 @@ func TestCreateRouteForOneReserveRevision(t *testing.T) { // Since updateRouteEvent looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) // Look for the route rule with activator as the destination. routerule, err := elaClient.ConfigV1alpha2().RouteRules(testNamespace).Get(fmt.Sprintf("%s-istio", route.Name), metav1.GetOptions{}) @@ -448,10 +422,10 @@ func TestCreateRouteForOneReserveRevision(t *testing.T) { } func TestCreateRouteFromConfigsWithMultipleRevs(t *testing.T) { - _, elaClient, controller, _, elaInformer := newTestController(t) + _, elaClient, receiver, _, elaInformer := newTestReceiver(t) // A configuration and associated revision. Normally the revision would be - // created by the configuration controller. + // created by the configuration receiver. config := getTestConfiguration() latestReadyRev := getTestRevisionForConfig(config) otherRev := &v1alpha1.Revision{ @@ -488,7 +462,7 @@ func TestCreateRouteFromConfigsWithMultipleRevs(t *testing.T) { // Since updateRouteEvent looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) routerule, err := elaClient.ConfigV1alpha2().RouteRules(testNamespace).Get(fmt.Sprintf("%s-istio", route.Name), metav1.GetOptions{}) if err != nil { @@ -535,13 +509,13 @@ func TestCreateRouteFromConfigsWithMultipleRevs(t *testing.T) { } func TestCreateRouteWithMultipleTargets(t *testing.T) { - _, elaClient, controller, _, elaInformer := newTestController(t) + _, elaClient, receiver, _, elaInformer := newTestReceiver(t) // A standalone revision rev := getTestRevision("test-rev") elaClient.ServingV1alpha1().Revisions(testNamespace).Create(rev) // A configuration and associated revision. Normally the revision would be - // created by the configuration controller. + // created by the configuration receiver. config := getTestConfiguration() cfgrev := getTestRevisionForConfig(config) config.Status.LatestReadyRevisionName = cfgrev.Name @@ -567,7 +541,7 @@ func TestCreateRouteWithMultipleTargets(t *testing.T) { // Since updateRouteEvent looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) routerule, err := elaClient.ConfigV1alpha2().RouteRules(testNamespace).Get(fmt.Sprintf("%s-istio", route.Name), metav1.GetOptions{}) if err != nil { @@ -616,8 +590,8 @@ func TestCreateRouteWithMultipleTargets(t *testing.T) { // Test one out of multiple target revisions is in Reserve serving state. func TestCreateRouteWithOneTargetReserve(t *testing.T) { - _, elaClient, controller, _, elaInformer := newTestController(t) - controller.enableScaleToZero = k8sflag.Bool("enable-scale-to-zero", true) + _, elaClient, receiver, _, elaInformer := newTestReceiver(t) + receiver.enableScaleToZero = k8sflag.Bool("enable-scale-to-zero", true) // A standalone inactive revision rev := getTestRevisionWithCondition("test-rev", @@ -629,7 +603,7 @@ func TestCreateRouteWithOneTargetReserve(t *testing.T) { elaClient.ServingV1alpha1().Revisions(testNamespace).Create(rev) // A configuration and associated revision. Normally the revision would be - // created by the configuration controller. + // created by the configuration receiver. config := getTestConfiguration() cfgrev := getTestRevisionForConfig(config) config.Status.LatestReadyRevisionName = cfgrev.Name @@ -655,7 +629,7 @@ func TestCreateRouteWithOneTargetReserve(t *testing.T) { // Since updateRouteEvent looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) routerule, err := elaClient.ConfigV1alpha2().RouteRules(testNamespace).Get(fmt.Sprintf("%s-istio", route.Name), metav1.GetOptions{}) if err != nil { @@ -701,14 +675,14 @@ func TestCreateRouteWithOneTargetReserve(t *testing.T) { } func TestCreateRouteWithDuplicateTargets(t *testing.T) { - _, elaClient, controller, _, elaInformer := newTestController(t) + _, elaClient, receiver, _, elaInformer := newTestReceiver(t) // A standalone revision rev := getTestRevision("test-rev") elaClient.ServingV1alpha1().Revisions(testNamespace).Create(rev) // A configuration and associated revision. Normally the revision would be - // created by the configuration controller. + // created by the configuration receiver. config := getTestConfiguration() cfgrev := getTestRevisionForConfig(config) config.Status.LatestReadyRevisionName = cfgrev.Name @@ -757,7 +731,7 @@ func TestCreateRouteWithDuplicateTargets(t *testing.T) { // Since updateRouteEvent looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) routerule, err := elaClient.ConfigV1alpha2().RouteRules(testNamespace).Get(fmt.Sprintf("%s-istio", route.Name), metav1.GetOptions{}) if err != nil { @@ -818,13 +792,13 @@ func TestCreateRouteWithDuplicateTargets(t *testing.T) { } func TestCreateRouteWithNamedTargets(t *testing.T) { - _, elaClient, controller, _, elaInformer := newTestController(t) + _, elaClient, receiver, _, elaInformer := newTestReceiver(t) // A standalone revision rev := getTestRevision("test-rev") elaClient.ServingV1alpha1().Revisions(testNamespace).Create(rev) // A configuration and associated revision. Normally the revision would be - // created by the configuration controller. + // created by the configuration receiver. config := getTestConfiguration() cfgrev := getTestRevisionForConfig(config) config.Status.LatestReadyRevisionName = cfgrev.Name @@ -854,7 +828,7 @@ func TestCreateRouteWithNamedTargets(t *testing.T) { // Since updateRouteEvent looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) domain := fmt.Sprintf("%s.%s.%s", route.Name, route.Namespace, defaultDomainSuffix) @@ -963,7 +937,7 @@ func TestCreateRouteWithNamedTargets(t *testing.T) { } func TestCreateRouteDeletesOutdatedRouteRules(t *testing.T) { - _, elaClient, controller, _, _ := newTestController(t) + _, elaClient, receiver, _, _ := newTestReceiver(t) config := getTestConfiguration() rev := getTestRevisionForConfig(config) route := getTestRouteWithTrafficTargets( @@ -1014,7 +988,7 @@ func TestCreateRouteDeletesOutdatedRouteRules(t *testing.T) { } elaClient.ServingV1alpha1().Routes("test").Create(route) - if err := controller.removeOutdatedRouteRules(testCtx, route); err != nil { + if err := receiver.removeOutdatedRouteRules(testCtx, route); err != nil { t.Errorf("Unexpected error occurred removing outdated route rules: %s", err) } @@ -1031,7 +1005,7 @@ func TestCreateRouteDeletesOutdatedRouteRules(t *testing.T) { } func TestSetLabelToConfigurationDirectlyConfigured(t *testing.T) { - _, elaClient, controller, _, elaInformer := newTestController(t) + _, elaClient, receiver, _, elaInformer := newTestReceiver(t) config := getTestConfiguration() rev := getTestRevisionForConfig(config) route := getTestRouteWithTrafficTargets( @@ -1048,7 +1022,7 @@ func TestSetLabelToConfigurationDirectlyConfigured(t *testing.T) { elaClient.ServingV1alpha1().Routes(testNamespace).Create(route) // Since updateRouteEvent looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) config, err := elaClient.ServingV1alpha1().Configurations(testNamespace).Get(config.Name, metav1.GetOptions{}) if err != nil { @@ -1063,7 +1037,7 @@ func TestSetLabelToConfigurationDirectlyConfigured(t *testing.T) { } func TestSetLabelToRevisionDirectlyConfigured(t *testing.T) { - _, elaClient, controller, _, elaInformer := newTestController(t) + _, elaClient, receiver, _, elaInformer := newTestReceiver(t) config := getTestConfiguration() rev := getTestRevisionForConfig(config) route := getTestRouteWithTrafficTargets( @@ -1080,7 +1054,7 @@ func TestSetLabelToRevisionDirectlyConfigured(t *testing.T) { elaClient.ServingV1alpha1().Routes(testNamespace).Create(route) // Since updateRouteEvent looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) rev, err := elaClient.ServingV1alpha1().Revisions(testNamespace).Get(rev.Name, metav1.GetOptions{}) if err != nil { @@ -1100,7 +1074,7 @@ func TestSetLabelToRevisionDirectlyConfigured(t *testing.T) { config.Status.LatestReadyRevisionName = rev.Name elaClient.ServingV1alpha1().Configurations(testNamespace).Update(config) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) rev, err = elaClient.ServingV1alpha1().Revisions(testNamespace).Get(rev.Name, metav1.GetOptions{}) if err != nil { @@ -1119,7 +1093,7 @@ func TestSetLabelToRevisionDirectlyConfigured(t *testing.T) { } func TestSetLabelToConfigurationAndRevisionIndirectlyConfigured(t *testing.T) { - _, elaClient, controller, _, elaInformer := newTestController(t) + _, elaClient, receiver, _, elaInformer := newTestReceiver(t) config := getTestConfiguration() rev := getTestRevisionForConfig(config) route := getTestRouteWithTrafficTargets( @@ -1136,7 +1110,7 @@ func TestSetLabelToConfigurationAndRevisionIndirectlyConfigured(t *testing.T) { elaClient.ServingV1alpha1().Routes(testNamespace).Create(route) // Since updateRouteEvent looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) config, err := elaClient.ServingV1alpha1().Configurations(testNamespace).Get(config.Name, metav1.GetOptions{}) if err != nil { @@ -1166,7 +1140,7 @@ func TestSetLabelToConfigurationAndRevisionIndirectlyConfigured(t *testing.T) { } func TestCreateRouteWithInvalidConfigurationShouldReturnError(t *testing.T) { - _, elaClient, controller, _, elaInformer := newTestController(t) + _, elaClient, receiver, _, elaInformer := newTestReceiver(t) config := getTestConfiguration() rev := getTestRevisionForConfig(config) route := getTestRouteWithTrafficTargets( @@ -1204,14 +1178,14 @@ func TestCreateRouteWithInvalidConfigurationShouldReturnError(t *testing.T) { expectedErrMsg := "Configuration \"test-config\" is already in use by \"another-route\", and cannot be used by \"test-route\"" // Should return error. - err := controller.updateRouteEvent(route.Namespace + "/" + route.Name) + err := receiver.SyncRoute(route) if wanted, got := expectedErrMsg, err.Error(); wanted != got { t.Errorf("unexpected error: %q expected: %q", got, wanted) } } func TestSetLabelNotChangeConfigurationAndRevisionLabelIfLabelExists(t *testing.T) { - _, elaClient, controller, _, elaInformer := newTestController(t) + _, elaClient, receiver, _, elaInformer := newTestReceiver(t) config := getTestConfiguration() rev := getTestRevisionForConfig(config) route := getTestRouteWithTrafficTargets( @@ -1252,11 +1226,11 @@ func TestSetLabelNotChangeConfigurationAndRevisionLabelIfLabelExists(t *testing. }, ) - controller.updateRouteEvent(route.Namespace + "/" + route.Name) + receiver.SyncRoute(route) } func TestDeleteLabelOfConfigurationAndRevisionWhenUnconfigured(t *testing.T) { - _, elaClient, controller, _, elaInformer := newTestController(t) + _, elaClient, receiver, _, elaInformer := newTestReceiver(t) route := getTestRouteWithTrafficTargets([]v1alpha1.TrafficTarget{}) config := getTestConfiguration() // Set a route label in configuration which is expected to be deleted. @@ -1270,7 +1244,7 @@ func TestDeleteLabelOfConfigurationAndRevisionWhenUnconfigured(t *testing.T) { // Since updateRouteEvent looks in the lister, we need to add it to the informer elaInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) elaClient.ServingV1alpha1().Routes(testNamespace).Create(route) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) config, err := elaClient.ServingV1alpha1().Configurations(testNamespace).Get(config.Name, metav1.GetOptions{}) if err != nil { @@ -1300,7 +1274,7 @@ func TestDeleteLabelOfConfigurationAndRevisionWhenUnconfigured(t *testing.T) { } func TestUpdateRouteDomainWhenRouteLabelChanges(t *testing.T) { - kubeClient, elaClient, controller, _, elaInformer := newTestController(t) + kubeClient, elaClient, receiver, _, elaInformer := newTestReceiver(t) route := getTestRouteWithTrafficTargets([]v1alpha1.TrafficTarget{}) routeClient := elaClient.ServingV1alpha1().Routes(route.Namespace) ingressClient := kubeClient.Extensions().Ingresses(route.Namespace) @@ -1308,7 +1282,7 @@ func TestUpdateRouteDomainWhenRouteLabelChanges(t *testing.T) { // Create a route. elaInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) routeClient.Create(route) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) // Confirms that by default the route get the defaultDomainSuffix. expectations := []struct { @@ -1325,7 +1299,7 @@ func TestUpdateRouteDomainWhenRouteLabelChanges(t *testing.T) { route.ObjectMeta.Labels = expectation.labels elaInformer.Serving().V1alpha1().Routes().Informer().GetIndexer().Add(route) routeClient.Update(route) - controller.updateRouteEvent(KeyOrDie(route)) + receiver.SyncRoute(route) // Confirms that the new route get the correct domain. route, _ = routeClient.Get(route.Name, metav1.GetOptions{}) @@ -1349,7 +1323,7 @@ func TestUpdateRouteDomainWhenRouteLabelChanges(t *testing.T) { } func TestUpdateRouteWhenConfigurationChanges(t *testing.T) { - _, elaClient, controller, _, elaInformer := newTestController(t) + _, elaClient, receiver, _, elaInformer := newTestReceiver(t) routeClient := elaClient.ServingV1alpha1().Routes(testNamespace) config := getTestConfiguration() @@ -1372,7 +1346,7 @@ func TestUpdateRouteWhenConfigurationChanges(t *testing.T) { elaInformer.Serving().V1alpha1().Configurations().Informer().GetIndexer().Add(config) elaClient.ServingV1alpha1().Revisions(testNamespace).Create(rev) - controller.addConfigurationEvent(config) + receiver.SyncConfiguration(config) route, err := routeClient.Get(route.Name, metav1.GetOptions{}) if err != nil { @@ -1398,7 +1372,7 @@ func TestUpdateRouteWhenConfigurationChanges(t *testing.T) { // Since addConfigurationEvent looks in the lister, we need to add it to the // informer elaInformer.Serving().V1alpha1().Configurations().Informer().GetIndexer().Add(config) - controller.addConfigurationEvent(config) + receiver.SyncConfiguration(config) route, err = routeClient.Get(route.Name, metav1.GetOptions{}) if err != nil { @@ -1424,7 +1398,7 @@ func TestUpdateRouteWhenConfigurationChanges(t *testing.T) { } func TestAddConfigurationEventNotUpdateAnythingIfHasNoLatestReady(t *testing.T) { - _, elaClient, controller, _, elaInformer := newTestController(t) + _, elaClient, receiver, _, elaInformer := newTestReceiver(t) config := getTestConfiguration() rev := getTestRevisionForConfig(config) route := getTestRouteWithTrafficTargets( @@ -1461,12 +1435,12 @@ func TestAddConfigurationEventNotUpdateAnythingIfHasNoLatestReady(t *testing.T) }, ) - controller.addConfigurationEvent(config) + receiver.SyncConfiguration(config) } // Test route when we do not use activator, and then use activator. func TestUpdateIngressEventUpdateRouteStatus(t *testing.T) { - kubeClient, elaClient, controller, _, _ := newTestController(t) + kubeClient, elaClient, receiver, _, _ := newTestReceiver(t) route := getTestRouteWithTrafficTargets( []v1alpha1.TrafficTarget{ @@ -1480,11 +1454,11 @@ func TestUpdateIngressEventUpdateRouteStatus(t *testing.T) { routeClient := elaClient.ServingV1alpha1().Routes(route.Namespace) routeClient.Create(route) // Create an ingress owned by this route. - controller.reconcileIngress(testCtx, route) + receiver.reconcileIngress(testCtx, route) // Before ingress has an IP address, route isn't marked as Ready. ingressClient := kubeClient.Extensions().Ingresses(route.Namespace) ingress, _ := ingressClient.Get(ctrl.GetElaK8SIngressName(route), metav1.GetOptions{}) - controller.updateIngressEvent(nil, ingress) + receiver.SyncIngress(ingress) route, _ = routeClient.Get(route.Name, metav1.GetOptions{}) if nil != route.Status.Conditions { t.Errorf("Route Status.Conditions should be nil, saw %v", route.Status.Conditions) @@ -1493,7 +1467,7 @@ func TestUpdateIngressEventUpdateRouteStatus(t *testing.T) { ingress.Status.LoadBalancer.Ingress = []corev1.LoadBalancerIngress{{ IP: "127.0.0.1", }} - controller.updateIngressEvent(nil, ingress) + receiver.SyncIngress(ingress) // Verify now that Route.Status.Conditions is set correctly. expectedConditions := []v1alpha1.RouteCondition{{ Type: v1alpha1.RouteConditionReady, diff --git a/pkg/controller/route/ela_service.go b/pkg/receiver/route/service.go similarity index 100% rename from pkg/controller/route/ela_service.go rename to pkg/receiver/route/service.go diff --git a/pkg/receiver/service/helper.go b/pkg/receiver/service/helper.go new file mode 100644 index 000000000000..28106be287c3 --- /dev/null +++ b/pkg/receiver/service/helper.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "reflect" + + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/knative/serving/pkg/apis/serving/v1alpha1" + "github.com/knative/serving/pkg/logging/logkey" +) + +// loggerWithServiceInfo enriches the logs with service name and namespace. +func loggerWithServiceInfo(logger *zap.SugaredLogger, ns string, name string) *zap.SugaredLogger { + return logger.With(zap.String(logkey.Namespace, ns), zap.String(logkey.Service, name)) +} + +func (c *Receiver) updateStatus(service *v1alpha1.Service) (*v1alpha1.Service, error) { + serviceClient := c.ElaClientSet.ServingV1alpha1().Services(service.Namespace) + existing, err := serviceClient.Get(service.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + // Check if there is anything to update. + if !reflect.DeepEqual(existing.Status, service.Status) { + existing.Status = service.Status + // TODO: for CRD there's no updatestatus, so use normal update. + return serviceClient.Update(existing) + } + return existing, nil +} diff --git a/pkg/receiver/service/receiver.go b/pkg/receiver/service/receiver.go new file mode 100644 index 000000000000..227a5968067a --- /dev/null +++ b/pkg/receiver/service/receiver.go @@ -0,0 +1,70 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "go.uber.org/zap" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + + clientset "github.com/knative/serving/pkg/client/clientset/versioned" + informers "github.com/knative/serving/pkg/client/informers/externalversions" + listers "github.com/knative/serving/pkg/client/listers/serving/v1alpha1" + "github.com/knative/serving/pkg/controller" + "github.com/knative/serving/pkg/controller/service" + "github.com/knative/serving/pkg/receiver" +) + +// Receiver implements the receiver for Service resources. +type Receiver struct { + *receiver.Base + + // lister indexes properties about Services + lister listers.ServiceLister + synced cache.InformerSynced +} + +// Receiver implements service.Receiver +var _ service.Receiver = (*Receiver)(nil) + +const ( + controllerAgentName = "service-controller" +) + +// New returns an implementation of several receivers suitable for managing the +// lifecycle of a Service. +func New( + kubeClientSet kubernetes.Interface, + elaClientSet clientset.Interface, + kubeInformerFactory kubeinformers.SharedInformerFactory, + elaInformerFactory informers.SharedInformerFactory, + config *rest.Config, + controllerConfig controller.Config, + logger *zap.SugaredLogger) service.Receiver { + + // obtain references to a shared index informer for the Services. + informer := elaInformerFactory.Serving().V1alpha1().Services() + + return &Receiver{ + Base: receiver.NewBase(kubeClientSet, elaClientSet, kubeInformerFactory, + elaInformerFactory, informer.Informer(), controllerAgentName, logger), + lister: informer.Lister(), + synced: informer.Informer().HasSynced, + } +} diff --git a/pkg/controller/service/ela_resource.go b/pkg/receiver/service/resource.go similarity index 100% rename from pkg/controller/service/ela_resource.go rename to pkg/receiver/service/resource.go diff --git a/pkg/receiver/service/service.go b/pkg/receiver/service/service.go new file mode 100644 index 000000000000..37fc7f076240 --- /dev/null +++ b/pkg/receiver/service/service.go @@ -0,0 +1,107 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/knative/serving/pkg/apis/serving/v1alpha1" +) + +// SyncService implements service.Receiver +func (c *Receiver) SyncService(service *v1alpha1.Service) error { + logger := loggerWithServiceInfo(c.Logger, service.Namespace, service.Name) + + // We added the Generation to avoid fighting the Configuration controller, + // which adds a Generation to avoid fighting the Revision controller. We + // shouldn't need this once k8s 1.10 lands, see: + // https://github.com/kubernetes/kubernetes/issues/58778 + // TODO(#642): Remove this. + if service.GetGeneration() == service.Status.ObservedGeneration { + logger.Infof("Skipping reconcile since already reconciled %d == %d", + service.Spec.Generation, service.Status.ObservedGeneration) + return nil + } + + logger.Infof("Running reconcile Service for %s\n%+v\n", service.Name, service) + + config := MakeServiceConfiguration(service) + if err := c.reconcileConfiguration(config); err != nil { + logger.Errorf("Failed to update Configuration for %q: %v", service.Name, err) + return err + } + + // TODO: If revision is specified, check that the revision is ready before + // switching routes to it. Though route controller might just do the right thing? + + route := MakeServiceRoute(service, config.Name) + if err := c.reconcileRoute(route); err != nil { + logger.Errorf("Failed to update Route for %q: %v", service.Name, err) + return err + } + + // Update the Status of the Service with the latest generation that + // we just reconciled against so we don't keep generating Revisions. + // TODO(#642): Remove this. + service.Status.ObservedGeneration = service.Spec.Generation + + logger.Infof("Updating the Service status:\n%+v", service) + + if _, err := c.updateStatus(service); err != nil { + logger.Errorf("Failed to update Service %q: %v", service.Name, err) + return err + } + + return nil +} + +func (c *Receiver) reconcileConfiguration(config *v1alpha1.Configuration) error { + configClient := c.ElaClientSet.ServingV1alpha1().Configurations(config.Namespace) + + existing, err := configClient.Get(config.Name, metav1.GetOptions{}) + if err != nil { + if apierrs.IsNotFound(err) { + _, err := configClient.Create(config) + return err + } + return err + } + // TODO(vaikas): Perhaps only update if there are actual changes. + copy := existing.DeepCopy() + copy.Spec = config.Spec + _, err = configClient.Update(copy) + return err +} + +func (c *Receiver) reconcileRoute(route *v1alpha1.Route) error { + routeClient := c.ElaClientSet.ServingV1alpha1().Routes(route.Namespace) + + existing, err := routeClient.Get(route.Name, metav1.GetOptions{}) + if err != nil { + if apierrs.IsNotFound(err) { + _, err := routeClient.Create(route) + return err + } + return err + } + // TODO(vaikas): Perhaps only update if there are actual changes. + copy := existing.DeepCopy() + copy.Spec = route.Spec + _, err = routeClient.Update(copy) + return err +} diff --git a/pkg/controller/service/service_configuration.go b/pkg/receiver/service/service_configuration.go similarity index 100% rename from pkg/controller/service/service_configuration.go rename to pkg/receiver/service/service_configuration.go diff --git a/pkg/controller/service/service_configuration_test.go b/pkg/receiver/service/service_configuration_test.go similarity index 100% rename from pkg/controller/service/service_configuration_test.go rename to pkg/receiver/service/service_configuration_test.go diff --git a/pkg/controller/service/service_route.go b/pkg/receiver/service/service_route.go similarity index 100% rename from pkg/controller/service/service_route.go rename to pkg/receiver/service/service_route.go diff --git a/pkg/controller/service/service_route_test.go b/pkg/receiver/service/service_route_test.go similarity index 100% rename from pkg/controller/service/service_route_test.go rename to pkg/receiver/service/service_route_test.go