From 99fddec90b5249f5246eb2a1793c5e51d0ec96b7 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Fri, 5 Apr 2019 10:13:12 -0700 Subject: [PATCH 01/26] WIP --- cmd/fanoutsidecar/channelwatcher.go | 59 +++ cmd/fanoutsidecar/main.go | 141 ++++--- pkg/provisioners/channel_util.go | 5 +- .../filesystem/filesystem_watcher.go | 126 ------ .../filesystem/filesystem_watcher_test.go | 379 ------------------ pkg/sidecar/configmap/parse.go | 54 --- pkg/sidecar/configmap/parse_test.go | 213 ---------- pkg/sidecar/configmap/watcher/watcher.go | 49 --- pkg/sidecar/configmap/watcher/watcher_test.go | 125 ------ .../multi_channel_fanout_handler.go | 28 +- 10 files changed, 147 insertions(+), 1032 deletions(-) create mode 100644 cmd/fanoutsidecar/channelwatcher.go delete mode 100644 pkg/sidecar/configmap/filesystem/filesystem_watcher.go delete mode 100644 pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go delete mode 100644 pkg/sidecar/configmap/parse.go delete mode 100644 pkg/sidecar/configmap/parse_test.go delete mode 100644 pkg/sidecar/configmap/watcher/watcher.go delete mode 100644 pkg/sidecar/configmap/watcher/watcher_test.go diff --git a/cmd/fanoutsidecar/channelwatcher.go b/cmd/fanoutsidecar/channelwatcher.go new file mode 100644 index 00000000000..d29884e43d7 --- /dev/null +++ b/cmd/fanoutsidecar/channelwatcher.go @@ -0,0 +1,59 @@ +package main + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/logging" + "go.uber.org/zap" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error + +type reconciler struct { + client client.Client + logger *zap.Logger + handler WatchHandlerFunc +} + +func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) { + ctx := logging.WithLogger(context.TODO(), r.logger.With(zap.Any("request", req))) + r.logger.Info("New update for channel.") + if err := r.handler(ctx, r.client, req.NamespacedName); err != nil { + r.logger.Error("WatchHandlerFunc returned error", zap.Error(err)) + return reconcile.Result{}, err + } + return reconcile.Result{}, nil +} + +func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) error { + c, err := controller.New("ChannelWatcher", mgr, controller.Options{ + Reconciler: &reconciler{ + client: mgr.GetClient(), + logger: logger, + handler: watchHandler, + }, + }) + if err != nil { + logger.Error("Unable to create controller for channelwatcher.", zap.Error(err)) + return err + } + + // Watch Channels. + err = c.Watch(&source.Kind{ + Type: &v1alpha1.Channel{}, + }, &handler.EnqueueRequestForObject{}) + if err != nil { + logger.Error("Unable to watch Channels.", zap.Error(err), zap.Any("type", &v1alpha1.Channel{})) + return err + } + return nil +} diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 59e8ce8892b..52a71d65b75 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -25,49 +25,46 @@ import ( "fmt" "log" "net/http" - "strings" "time" - "github.com/knative/eventing/pkg/sidecar/configmap/filesystem" - "github.com/knative/eventing/pkg/sidecar/configmap/watcher" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/sidecar/fanout" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/sidecar/swappable" - "github.com/knative/eventing/pkg/utils" - "github.com/knative/pkg/system" "go.uber.org/zap" "go.uber.org/zap/zapcore" - "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" -) - -const ( - defaultConfigMapName = "in-memory-channel-dispatcher-config-map" - // The following are the only valid values of the config_map_noticer flag. - cmnfVolume = "volume" - cmnfWatcher = "watcher" + // uncomment this line to debug in GKE from local machine + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) var ( readTimeout = 1 * time.Minute writeTimeout = 1 * time.Minute - port int - configMapNoticer string - configMapNamespace string - configMapName string + port int + channelProvisioners listFlags ) -func init() { - flag.IntVar(&port, "sidecar_port", -1, "The port to run the sidecar on.") - flag.StringVar(&configMapNoticer, "config_map_noticer", "", fmt.Sprintf("The system to notice changes to the ConfigMap. Valid values are: %s", configMapNoticerValues())) - flag.StringVar(&configMapNamespace, "config_map_namespace", system.Namespace(), "The namespace of the ConfigMap that is watched for configuration.") - flag.StringVar(&configMapName, "config_map_name", defaultConfigMapName, "The name of the ConfigMap that is watched for configuration.") +type listFlags []string + +func (l *listFlags) String() string { + return "" +} +func (l *listFlags) Set(value string) error { + *l = append(*l, value) + return nil } -func configMapNoticerValues() string { - return strings.Join([]string{cmnfVolume, cmnfWatcher}, ", ") +func init() { + flag.IntVar(&port, "sidecar_port", -1, "The port to run the sidecar on.") + flag.Var(&channelProvisioners, "channel_provisioners", "The provisioner of the channels that will be watched.") } func main() { @@ -84,14 +81,18 @@ func main() { logger.Fatal("--sidecar_port flag must be set") } + if len(channelProvisioners) < 1 { + logger.Fatal("--channel_provisioners must be specified") + } + sh, err := swappable.NewEmptyHandler(logger) if err != nil { logger.Fatal("Unable to create swappable.Handler", zap.Error(err)) } - mgr, err := setupConfigMapNoticer(logger, sh.UpdateConfig) + mgr, err := setupChannelWatcher(logger, sh.UpdateConfig) if err != nil { - logger.Fatal("Unable to create configMap noticer.", zap.Error(err)) + logger.Fatal("Unable to create channel watcher.", zap.Error(err)) } s := &http.Server{ @@ -125,57 +126,73 @@ func main() { } } -func setupConfigMapNoticer(logger *zap.Logger, configUpdated swappable.UpdateConfig) (manager.Manager, error) { - mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) - if err != nil { - logger.Error("Error starting manager.", zap.Error(err)) - return nil, err - } - - switch configMapNoticer { - case cmnfVolume: - err = setupConfigMapVolume(logger, mgr, configUpdated) - case cmnfWatcher: - err = setupConfigMapWatcher(logger, mgr, configUpdated) - default: - err = fmt.Errorf("need to provide the --config_map_noticer flag (valid values are %s)", configMapNoticerValues()) - } +func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfig) (manager.Manager, error) { + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) // TODO: Add scheme if err != nil { + logger.Error("Error creating new maanger.", zap.Error(err)) return nil, err } + v1alpha1.AddToScheme(mgr.GetScheme()) + New(mgr, logger, updateChannelConfig(configUpdated)) return mgr, nil } -func setupConfigMapVolume(logger *zap.Logger, mgr manager.Manager, configUpdated swappable.UpdateConfig) error { - cmn, err := filesystem.NewConfigMapWatcher(logger, filesystem.ConfigDir, configUpdated) - if err != nil { - logger.Error("Unable to create filesystem.ConifgMapWatcher", zap.Error(err)) - return err +func updateChannelConfig(updateConfig swappable.UpdateConfig) WatchHandlerFunc { + return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { + channels, err := listAllChannels(ctx, c) + if err != nil { + logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) + return err + } + config := multiChannelFanoutConfig(channels) + return updateConfig(config) } - if err = mgr.Add(cmn); err != nil { - logger.Error("Unable to add the config map watcher", zap.Error(err)) - return err - } - return nil } -func setupConfigMapWatcher(logger *zap.Logger, mgr manager.Manager, configUpdated swappable.UpdateConfig) error { - kc, err := kubernetes.NewForConfig(mgr.GetConfig()) - if err != nil { - return err +func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) { + channels := make([]v1alpha1.Channel, 0) + cl := &v1alpha1.ChannelList{} + if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { + return nil, err + } + for _, c := range cl.Items { + if c.Status.IsReady() && shouldWatch(&c) { + channels = append(channels, c) + } } + return channels, nil +} - cmw, err := watcher.NewWatcher(logger, kc, configMapNamespace, configMapName, configUpdated) - if err != nil { - return err +func shouldWatch(ch *v1alpha1.Channel) bool { + if ch.Spec.Provisioner != nil && ch.Spec.Provisioner.Namespace == "" { + for _, v := range channelProvisioners { + if v == ch.Spec.Provisioner.Name { + return true + } + } } + return false +} - if err = mgr.Add(utils.NewBlockingStart(logger, cmw)); err != nil { - logger.Error("Unable to add the config map watcher", zap.Error(err)) - return err +func multiChannelFanoutConfig(channels []v1alpha1.Channel) *multichannelfanout.Config { + cc := make([]multichannelfanout.ChannelConfig, 0) + for _, c := range channels { + channelConfig := multichannelfanout.ChannelConfig{ + Namespace: c.Namespace, + Name: c.Name, + HostName: c.Status.Address.Hostname, + } + if c.Spec.Subscribable != nil { + channelConfig.FanoutConfig = fanout.Config{ + Subscriptions: c.Spec.Subscribable.Subscribers, + } + } + cc = append(cc, channelConfig) + } + return &multichannelfanout.Config{ + ChannelConfigs: cc, } - return nil } // runnableServer is a small wrapper around http.Server so that it matches the manager.Runnable diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index a4261fe8a1e..a6a58011042 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -73,9 +73,8 @@ func CreateK8sService(ctx context.Context, client runtimeClient.Client, c *event func getK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel) (*corev1.Service, error) { list := &corev1.ServiceList{} opts := &runtimeClient.ListOptions{ - Namespace: c.Namespace, - // TODO After the full release start selecting on new set of labels by using k8sServiceLabels(c) - LabelSelector: labels.SelectorFromSet(k8sOldServiceLabels(c)), + Namespace: c.Namespace, + LabelSelector: labels.SelectorFromSet(k8sServiceLabels(c)), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, diff --git a/pkg/sidecar/configmap/filesystem/filesystem_watcher.go b/pkg/sidecar/configmap/filesystem/filesystem_watcher.go deleted file mode 100644 index 12f5042d51e..00000000000 --- a/pkg/sidecar/configmap/filesystem/filesystem_watcher.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package filesystem - -import ( - "errors" - - "github.com/fsnotify/fsnotify" - sidecarconfigmap "github.com/knative/eventing/pkg/sidecar/configmap" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "github.com/knative/eventing/pkg/sidecar/swappable" - "github.com/knative/pkg/configmap" - "go.uber.org/zap" -) - -const ( - // ConfigDir is the mount path of the configMap volume. - ConfigDir = "/etc/config/fanout_sidecar" -) - -// Monitors an attached ConfigMap volume for updated configuration and calls `configUpdated` when -// the value changes. -type ConfigMapWatcher struct { - logger *zap.Logger - // The directory to read the configMap from. - dir string - // Stop the watcher by closing this channel. - watcherStopCh chan<- bool - - // The function to call when the configuration is updated. - configUpdated swappable.UpdateConfig -} - -// NewConfigMapWatcher creates a new filesystem.ConfigMapWatcher. The caller is responsible for -// calling Start(<-chan), likely via a controller-runtime Manager. -func NewConfigMapWatcher(logger *zap.Logger, dir string, updateConfig swappable.UpdateConfig) (*ConfigMapWatcher, error) { - conf, err := readConfigMap(logger, dir) - if err != nil { - logger.Error("Unable to read configMap", zap.Error(err)) - return nil, err - } - - logger.Info("Read initial configMap", zap.Any("conf", conf)) - - err = updateConfig(conf) - if err != nil { - logger.Error("Unable to use the initial configMap: %v", zap.Error(err)) - return nil, err - } - - cmw := &ConfigMapWatcher{ - logger: logger, - dir: dir, - configUpdated: updateConfig, - } - return cmw, nil -} - -// readConfigMap attempts to read the configMap from the attached volume. -func readConfigMap(logger *zap.Logger, dir string) (*multichannelfanout.Config, error) { - cm, err := configmap.Load(dir) - if err != nil { - return nil, err - } - return sidecarconfigmap.NewFanoutConfig(logger, cm) -} - -// updateConfig reads the configMap data and calls `configUpdated` with the updated value. -func (cmw *ConfigMapWatcher) updateConfig() { - conf, err := readConfigMap(cmw.logger, cmw.dir) - if err != nil { - cmw.logger.Error("Unable to read the configMap", zap.Error(err)) - return - } - err = cmw.configUpdated(conf) - if err != nil { - cmw.logger.Error("Unable to update config", zap.Error(err)) - return - } -} - -// Start implements controller runtime's manager.Runnable. -func (cmw *ConfigMapWatcher) Start(stopCh <-chan struct{}) error { - watcher, err := fsnotify.NewWatcher() - if err != nil { - return err - } - - err = watcher.Add(cmw.dir) - if err != nil { - return err - } - - for { - select { - case _, ok := <-watcher.Events: - if !ok { - // Channel closed. - return errors.New("watcher.Events channel closed") - } - cmw.updateConfig() - case e, ok := <-watcher.Errors: - if !ok { - // Channel closed. - return errors.New("watcher.Errors channel closed") - } - cmw.logger.Error("watcher.Errors", zap.Error(e)) - case <-stopCh: - return watcher.Close() - } - } -} diff --git a/pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go b/pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go deleted file mode 100644 index 84a0ac83912..00000000000 --- a/pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package filesystem - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "strings" - "sync" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" - "github.com/knative/eventing/pkg/sidecar/configmap" - "github.com/knative/eventing/pkg/sidecar/fanout" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "github.com/knative/eventing/pkg/utils" - "go.uber.org/zap" - yaml "gopkg.in/yaml.v2" -) - -func TestReadConfigMap(t *testing.T) { - testCases := []struct { - name string - createDir bool - config string - expected *multichannelfanout.Config - expectedErr bool - }{ - { - name: "dir does not exist", - createDir: false, - }, - { - name: "no data", - createDir: true, - expectedErr: true, - }, - { - name: "invalid YAML", - createDir: true, - config: ` - key: - - value - - different indent level - `, - expectedErr: true, - }, - { - name: "valid YAML -- invalid JSON", - config: "{ nil: Key }", - createDir: true, - expectedErr: true, - }, - { - name: "unknown field", - config: "{ channelConfigs: [ { not: a-defined-field } ] }", - createDir: true, - expectedErr: true, - }, - { - name: "valid", - createDir: true, - config: ` - channelConfigs: - - namespace: default - name: c1 - fanoutConfig: - subscriptions: - - subscriberURI: event-changer.default.svc.` + utils.GetClusterDomainName() + ` - replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` - - subscriberURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` - - replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` - - namespace: default - name: c2 - fanoutConfig: - subscriptions: - - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` - - namespace: other - name: c3 - fanoutConfig: - subscriptions: - - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName(), - expected: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "c1", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - SubscriberURI: "event-changer.default.svc." + utils.GetClusterDomainName(), - ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), - }, - { - SubscriberURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), - }, - { - ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), - }, - }, - }, - }, - { - Namespace: "default", - Name: "c2", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), - }, - }, - }, - }, - { - Namespace: "other", - Name: "c3", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), - }, - }, - }, - }, - }, - }, - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - var dir string - if tc.createDir { - var cleanup func() - dir, cleanup = createTempDir(t) - defer cleanup() - } else { - dir = "/tmp/doesNotExist" - } - writeConfigString(t, dir, tc.config) - c, e := readConfigMap(zap.NewNop(), dir) - if tc.expectedErr { - if e == nil { - t.Errorf("Expected an error, actual nil") - } - return - } - if !cmp.Equal(c, tc.expected) { - t.Errorf("Unexpected config. Expected '%v'. Actual '%v'.", tc.expected, c) - } - }) - } -} - -func TestWatch(t *testing.T) { - testCases := map[string]struct { - initialConfigErr error - initialConfig *multichannelfanout.Config - updateConfigErr error - updateConfig *multichannelfanout.Config - }{ - "error applying initial config": { - initialConfig: &multichannelfanout.Config{}, - initialConfigErr: errors.New("test-induced error"), - }, - "read initial config": { - initialConfig: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "c1", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "foo.bar", - }, - }, - }, - }, - }, - }, - }, - "error apply updated config": { - initialConfig: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "c1", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "foo.bar", - }, - }, - }, - }, - }, - }, - updateConfigErr: errors.New("test-induced error"), - }, - "update config": { - initialConfig: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "c1", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "foo.bar", - }, - }, - }, - }, - }, - }, - updateConfig: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "new-channel", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - SubscriberURI: "baz.qux", - }, - }, - }, - }, - }, - }, - }, - } - for n, tc := range testCases { - t.Run(n, func(t *testing.T) { - dir, cleanup := createTempDir(t) - defer cleanup() - writeConfig(t, dir, tc.initialConfig) - - cuc := &configUpdatedChecker{ - updateConfigErr: tc.initialConfigErr, - } - cmw, err := NewConfigMapWatcher(zap.NewNop(), dir, cuc.updateConfig) - if err != nil { - if tc.initialConfigErr != err { - t.Errorf("Unexpected error making ConfigMapWatcher. Expected: '%v'. Actual '%v'", tc.initialConfigErr, err) - } - return - } - ac := cuc.getConfig() - if !cmp.Equal(tc.initialConfig, ac) { - t.Errorf("Unexpected initial config. Expected '%v'. Actual '%v'", tc.initialConfig, ac) - } - - stopCh := make(chan struct{}) - go func() { - _ = cmw.Start(stopCh) - }() - defer func() { - close(stopCh) - }() - // Sadly, the test is flaky unless we sleep here, waiting for the file system - // watcher to truly start. - time.Sleep(100 * time.Millisecond) - - if tc.updateConfigErr != nil { - cuc.updateConfigErr = tc.updateConfigErr - } - - expected := tc.initialConfig - if tc.updateConfig != nil { - expected = tc.updateConfig - } - - cuc.updateCalled = make(chan struct{}, 1) - writeConfig(t, dir, expected) - // The watcher is running in another goroutine, give it some time to notice the - // change. - select { - case <-cuc.updateCalled: - break - case <-time.After(5 * time.Second): - t.Errorf("Time out waiting for watcher to notice change.") - } - - ac = cuc.getConfig() - if !cmp.Equal(ac, expected) { - t.Errorf("Unexpected update config. Expected '%v'. Actual '%v'", expected, ac) - } - }) - } -} - -type configUpdatedChecker struct { - configLock sync.Mutex - config *multichannelfanout.Config - updateCalled chan struct{} - updateConfigErr error -} - -func (cuc *configUpdatedChecker) updateConfig(config *multichannelfanout.Config) error { - cuc.configLock.Lock() - defer cuc.configLock.Unlock() - cuc.config = config - if cuc.updateCalled != nil { - cuc.updateCalled <- struct{}{} - } - return cuc.updateConfigErr -} - -func (cuc *configUpdatedChecker) getConfig() *multichannelfanout.Config { - cuc.configLock.Lock() - defer cuc.configLock.Unlock() - return cuc.config -} - -func createTempDir(t *testing.T) (string, func()) { - dir, err := ioutil.TempDir("", "configMapHandlerTest") - if err != nil { - t.Errorf("Unable to make temp directory: %v", err) - } - return dir, func() { - _ = os.RemoveAll(dir) - } -} - -func writeConfig(t *testing.T, dir string, config *multichannelfanout.Config) { - if config != nil { - yb, err := yaml.Marshal(config) - if err != nil { - t.Errorf("Unable to marshal the config") - } - writeConfigString(t, dir, string(yb)) - } -} - -func writeConfigString(t *testing.T, dir, config string) { - if config != "" { - // Golang editors tend to replace leading spaces with tabs. YAML is left whitespace - // sensitive, so let's replace the tabs with spaces. - leftSpaceConfig := strings.Replace(config, "\t", " ", -1) - err := atomicWriteFile(t, fmt.Sprintf("%s/%s", dir, configmap.MultiChannelFanoutConfigKey), []byte(leftSpaceConfig), 0700) - if err != nil { - t.Errorf("Problem writing the config file: %v", err) - } - } -} - -func atomicWriteFile(t *testing.T, file string, bytes []byte, perm os.FileMode) error { - // In order to more closely replicate how K8s writes ConfigMaps to the file system, we will - // atomically swap out the file by writing it to a temp directory, then renaming it into the - // directory we are watching. - tempDir, cleanup := createTempDir(t) - defer cleanup() - - tempFile := fmt.Sprintf("%s/%s", tempDir, "temp") - err := ioutil.WriteFile(tempFile, bytes, perm) - if err != nil { - return err - } - return os.Rename(tempFile, file) -} diff --git a/pkg/sidecar/configmap/parse.go b/pkg/sidecar/configmap/parse.go deleted file mode 100644 index ba6da64f12c..00000000000 --- a/pkg/sidecar/configmap/parse.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package configmap - -import ( - "encoding/json" - "fmt" - - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "go.uber.org/zap" -) - -const ( - // MultiChannelFanoutConfigKey is the key in the ConfigMap that contains all the configuration - // data. - MultiChannelFanoutConfigKey = "multiChannelFanoutConfig" -) - -// NewFanoutConfig attempts to parse the config map's data into a multichannelfanout.Config. -// orig == NewFanoutConfig(SerializeConfig(orig)) -func NewFanoutConfig(logger *zap.Logger, data map[string]string) (*multichannelfanout.Config, error) { - str, present := data[MultiChannelFanoutConfigKey] - if !present { - logger.Error("Expected key not found", zap.String("key", MultiChannelFanoutConfigKey)) - return nil, fmt.Errorf("expected key not found: %v", MultiChannelFanoutConfigKey) - } - return multichannelfanout.Parse(logger, str) -} - -// SerializeConfig takes in a multichannelfanout.Config and generates the ConfigMap equivalent. -// orig == NewFanoutConfig(SerializeConfig(orig)) -func SerializeConfig(config multichannelfanout.Config) (map[string]string, error) { - jb, err := json.Marshal(config) - if err != nil { - return nil, err - } - return map[string]string{ - MultiChannelFanoutConfigKey: string(jb), - }, nil -} diff --git a/pkg/sidecar/configmap/parse_test.go b/pkg/sidecar/configmap/parse_test.go deleted file mode 100644 index cee271ce090..00000000000 --- a/pkg/sidecar/configmap/parse_test.go +++ /dev/null @@ -1,213 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package configmap - -import ( - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" - "github.com/knative/eventing/pkg/sidecar/fanout" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "github.com/knative/eventing/pkg/utils" - "go.uber.org/zap" -) - -func TestNewFanoutConfig(t *testing.T) { - testCases := []struct { - name string - config string - expected *multichannelfanout.Config - expectedErr bool - }{ - { - name: "no data", - expectedErr: true, - }, - { - name: "invalid YAML", - config: ` - key: - - value - - different indent level - `, - expectedErr: true, - }, - { - name: "valid YAML -- invalid JSON", - config: "{ nil: Key }", - expectedErr: true, - }, - { - name: "unknown field", - config: "{ channelConfigs: [ { not: a-defined-field } ] }", - expectedErr: true, - }, - { - name: "valid", - config: ` - channelConfigs: - - namespace: default - name: c1 - fanoutConfig: - subscriptions: - - subscriberURI: event-changer.default.svc.` + utils.GetClusterDomainName() + ` - replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` - - subscriberURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` - - replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` - - namespace: default - name: c2 - fanoutConfig: - subscriptions: - - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` - - namespace: other - name: c3 - fanoutConfig: - subscriptions: - - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName(), - expected: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "c1", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - SubscriberURI: "event-changer.default.svc." + utils.GetClusterDomainName(), - ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), - }, - { - SubscriberURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), - }, - { - ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), - }, - }, - }, - }, - { - Namespace: "default", - Name: "c2", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), - }, - }, - }, - }, - { - Namespace: "other", - Name: "c3", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), - }, - }, - }, - }, - }, - }, - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - data := formatData(tc.config) - c, e := NewFanoutConfig(zap.NewNop(), data) - if tc.expectedErr { - if e == nil { - t.Errorf("Expected an error, actual nil") - } - return - } - if !cmp.Equal(c, tc.expected) { - t.Errorf("Unexpected config. Expected '%v'. Actual '%v'.", tc.expected, c) - } - }) - } -} - -func TestSerializeConfig(t *testing.T) { - testCases := map[string]struct { - config *multichannelfanout.Config - }{ - "empty config": { - config: &multichannelfanout.Config{}, - }, - "full config": { - config: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "c1", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - SubscriberURI: "foo.example.com", - ReplyURI: "bar.example.com", - }, - { - ReplyURI: "qux.example.com", - }, - { - SubscriberURI: "baz.example.com", - }, - {}, - }, - }, - }, - { - Namespace: "other", - Name: "no-subs", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{}, - }, - }, - }, - }, - }, - } - - for n, tc := range testCases { - t.Run(n, func(t *testing.T) { - s, err := SerializeConfig(*tc.config) - if err != nil { - t.Errorf("Unexpected error serializing config: %v", err) - } - rt, err := NewFanoutConfig(zap.NewNop(), s) - if err != nil { - t.Errorf("Unexpected error deserializing: %v", err) - } - if diff := cmp.Diff(tc.config, rt); diff != "" { - t.Errorf("Unexpected error roundtripping the config (-want, +got): %v", diff) - } - }) - } -} - -func formatData(config string) map[string]string { - data := make(map[string]string) - if config != "" { - // Golang editors tend to replace leading spaces with tabs. YAML is left whitespace - // sensitive and disallows tabs, so let's replace the tabs with four spaces. - leftSpaceConfig := strings.Replace(config, "\t", " ", -1) - data[MultiChannelFanoutConfigKey] = leftSpaceConfig - } - return data -} diff --git a/pkg/sidecar/configmap/watcher/watcher.go b/pkg/sidecar/configmap/watcher/watcher.go deleted file mode 100644 index 01dc5d7af9a..00000000000 --- a/pkg/sidecar/configmap/watcher/watcher.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package watcher - -import ( - sidecarconfigmap "github.com/knative/eventing/pkg/sidecar/configmap" - "github.com/knative/eventing/pkg/sidecar/swappable" - "github.com/knative/pkg/configmap" - "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -// NewWatcher creates a new InformedWatcher that watches the specified ConfigMap and on any change -// that results in a valid multichannelfanout.Config calls configUpdated. -func NewWatcher(logger *zap.Logger, kc kubernetes.Interface, cmNamespace, cmName string, configUpdated swappable.UpdateConfig) (manager.Runnable, error) { - iw := configmap.NewInformedWatcher(kc, cmNamespace) - iw.Watch(cmName, func(cm *corev1.ConfigMap) { - config, err := sidecarconfigmap.NewFanoutConfig(logger, cm.Data) - if err != nil { - logger.Error("Could not parse ConfigMap", zap.Error(err), - zap.Any("configMap.Data", cm.Data)) - return - } - - err = configUpdated(config) - if err != nil { - logger.Error("Unable to update config", zap.Error(err)) - return - } - }) - - return iw, nil -} diff --git a/pkg/sidecar/configmap/watcher/watcher_test.go b/pkg/sidecar/configmap/watcher/watcher_test.go deleted file mode 100644 index 6164c38cd63..00000000000 --- a/pkg/sidecar/configmap/watcher/watcher_test.go +++ /dev/null @@ -1,125 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package watcher - -import ( - "errors" - "testing" - - "github.com/google/go-cmp/cmp" - eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" - sidecarconfigmap "github.com/knative/eventing/pkg/sidecar/configmap" - "github.com/knative/eventing/pkg/sidecar/fanout" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "github.com/knative/pkg/configmap" - "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - namespace = "test-namespace" - name = "test-name" -) - -func TestReconcile(t *testing.T) { - testCases := map[string]struct { - config map[string]string - updateConfigErr error - expectedConfig *multichannelfanout.Config - }{ - "missing key": { - config: map[string]string{}, - expectedConfig: nil, - }, - "cannot parse cm": { - config: map[string]string{ - sidecarconfigmap.MultiChannelFanoutConfigKey: "invalid config", - }, - expectedConfig: nil, - }, - "configUpdated fails": { - config: map[string]string{ - sidecarconfigmap.MultiChannelFanoutConfigKey: "", - }, - updateConfigErr: errors.New("test-error"), - expectedConfig: &multichannelfanout.Config{}, - }, - "success": { - config: map[string]string{ - sidecarconfigmap.MultiChannelFanoutConfigKey: ` - channelConfigs: - - name: foo - namespace: bar - fanoutConfig: - subscriptions: - - subscriberURI: subscriber - replyURI: reply`, - }, - expectedConfig: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Name: "foo", - Namespace: "bar", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - SubscriberURI: "subscriber", - ReplyURI: "reply", - }, - }, - }, - }, - }, - }, - }, - } - for n, tc := range testCases { - t.Run(n, func(t *testing.T) { - cuc := &configUpdatedChecker{ - updateConfigErr: tc.updateConfigErr, - } - - r, err := NewWatcher(zap.NewNop(), nil, namespace, name, cuc.updateConfig) - if err != nil { - t.Errorf("Error creating watcher: %v", err) - } - iw := r.(*configmap.InformedWatcher) - iw.OnChange(&corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - }, - Data: tc.config, - }) - - if diff := cmp.Diff(tc.expectedConfig, cuc.config); diff != "" { - t.Errorf("Unexpected config (-want +got): %v", diff) - } - }) - } -} - -type configUpdatedChecker struct { - config *multichannelfanout.Config - updateConfigErr error -} - -func (cuc *configUpdatedChecker) updateConfig(config *multichannelfanout.Config) error { - cuc.config = config - return cuc.updateConfigErr -} diff --git a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go index 8872b7026ac..a2f24cbc6d8 100644 --- a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go +++ b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go @@ -30,7 +30,6 @@ import ( "net/http" "github.com/google/go-cmp/cmp" - "github.com/knative/eventing/pkg/provisioners" "github.com/knative/eventing/pkg/sidecar/fanout" "go.uber.org/zap" ) @@ -43,29 +42,21 @@ type Config struct { // ChannelConfig is the configuration for a single Channel. type ChannelConfig struct { - Namespace string `json:"namespace"` - Name string `json:"name"` + Namespace string `json:"namespace"` + Name string `json:"name"` + HostName string FanoutConfig fanout.Config `json:"fanoutConfig"` } -// MakeChannelKey creates the key used for this Channel in the Handler's handlers map. -func makeChannelKey(namespace, name string) string { - return fmt.Sprintf("%s/%s", namespace, name) -} - // makeChannelKeyFromConfig creates the channel key for a given channelConfig. It is a helper around // MakeChannelKey. func makeChannelKeyFromConfig(config ChannelConfig) string { - return makeChannelKey(config.Namespace, config.Name) + return config.HostName } // getChannelKey extracts the channel key from the given HTTP request. -func getChannelKey(r *http.Request) (string, error) { - cr, err := provisioners.ParseChannel(r.Host) - if err != nil { - return "", err - } - return makeChannelKey(cr.Namespace, cr.Name), nil +func getChannelKey(r *http.Request) string { + return r.Host } // Handler is an http.Handler that introspects the incoming request to determine what Channel it is @@ -114,12 +105,7 @@ func (h *Handler) CopyWithNewConfig(conf Config) (*Handler, error) { // ServeHTTP delegates the actual handling of the request to a fanout.Handler, based on the // request's channel key. func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - channelKey, err := getChannelKey(r) - if err != nil { - h.logger.Error("Unable to extract channelKey", zap.Error(err)) - w.WriteHeader(http.StatusInternalServerError) - return - } + channelKey := getChannelKey(r) fh, ok := h.handlers[channelKey] if !ok { h.logger.Error("Unable to find a handler for request", zap.String("channelKey", channelKey)) From c642ceafe38fcf775d3a65a8794b9a3598b79049 Mon Sep 17 00:00:00 2001 From: akashrv Date: Sat, 6 Apr 2019 10:37:40 -0700 Subject: [PATCH 02/26] WIP - In-memory working with E2E tests --- cmd/fanoutsidecar/main.go | 9 +- .../in-memory-channel/in-memory-channel.yaml | 27 +----- .../pkg/controller/channel/reconcile.go | 14 +-- .../channelwatcher}/channelwatcher.go | 2 +- pkg/provisioners/channel_util.go | 10 +- .../inmemory/channel/controller.go | 19 +--- .../inmemory/channel/reconcile.go | 92 ++----------------- pkg/provisioners/inmemory/controller/main.go | 3 + .../v1alpha1/broker/resources/ingress.go | 6 +- pkg/sidecar/swappable/swappable.go | 4 + test/crd.go | 5 +- 11 files changed, 37 insertions(+), 154 deletions(-) rename {cmd/fanoutsidecar => pkg/channelwatcher}/channelwatcher.go (98%) diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 52a71d65b75..9787fdfaecb 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -28,6 +28,7 @@ import ( "time" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/channelwatcher" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/sidecar/fanout" "github.com/knative/eventing/pkg/sidecar/multichannelfanout" @@ -64,7 +65,7 @@ func (l *listFlags) Set(value string) error { func init() { flag.IntVar(&port, "sidecar_port", -1, "The port to run the sidecar on.") - flag.Var(&channelProvisioners, "channel_provisioners", "The provisioner of the channels that will be watched.") + flag.Var(&channelProvisioners, "channel_provisioner", "The provisioner of the channels that will be watched.") } func main() { @@ -127,18 +128,18 @@ func main() { } func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfig) (manager.Manager, error) { - mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) // TODO: Add scheme + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) if err != nil { logger.Error("Error creating new maanger.", zap.Error(err)) return nil, err } v1alpha1.AddToScheme(mgr.GetScheme()) - New(mgr, logger, updateChannelConfig(configUpdated)) + channelwatcher.New(mgr, logger, updateChannelConfig(configUpdated)) return mgr, nil } -func updateChannelConfig(updateConfig swappable.UpdateConfig) WatchHandlerFunc { +func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.WatchHandlerFunc { return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { channels, err := listAllChannels(ctx, c) if err != nil { diff --git a/config/provisioners/in-memory-channel/in-memory-channel.yaml b/config/provisioners/in-memory-channel/in-memory-channel.yaml index e0191da4081..cec26e323e3 100644 --- a/config/provisioners/in-memory-channel/in-memory-channel.yaml +++ b/config/provisioners/in-memory-channel/in-memory-channel.yaml @@ -87,8 +87,6 @@ rules: - "" # Core API Group. resources: - configmaps - resourceNames: - - in-memory-channel-dispatcher-config-map verbs: - update - apiGroups: @@ -168,9 +166,10 @@ metadata: name: in-memory-channel-dispatcher rules: - apiGroups: - - "" # Core API group. + - "eventing.knative.dev" resources: - - configmaps + - "channels" + - "channels/status" verbs: - get - list @@ -206,8 +205,6 @@ spec: role: dispatcher template: metadata: - annotations: - sidecar.istio.io/inject: "true" labels: *labels spec: serviceAccountName: in-memory-channel-dispatcher @@ -216,24 +213,10 @@ spec: image: github.com/knative/eventing/cmd/fanoutsidecar args: - --sidecar_port=8080 - - --config_map_noticer=watcher - - --config_map_namespace=knative-eventing - - --config_map_name=in-memory-channel-dispatcher-config-map + - --channel_provisioner=in-memory + - --channel_provisioner=in-memory-channel env: - name: SYSTEM_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - ---- - -# Create the ConfigMap, because if we don't the dispatcher will flap when it first comes online and -# this can cause the integration tests to fail. - -apiVersion: v1 -kind: ConfigMap -metadata: - name: in-memory-channel-dispatcher-config-map - namespace: knative-eventing -data: - multiChannelFanoutConfig: '{}' diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile.go b/contrib/gcppubsub/pkg/controller/channel/reconcile.go index b446f11de3c..a1836d83e8a 100644 --- a/contrib/gcppubsub/pkg/controller/channel/reconcile.go +++ b/contrib/gcppubsub/pkg/controller/channel/reconcile.go @@ -121,9 +121,6 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } logging.FromContext(ctx).Info("Reconciling Channel") - // Modify a copy, not the original. - c = c.DeepCopy() - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With(zap.Any("channel", c))) requeue, reconcileErr := r.reconcile(ctx, c) if reconcileErr != nil { @@ -163,9 +160,8 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) // We are syncing four things: // 1. The K8s Service to talk to this Channel. - // 2. The Istio VirtualService to talk to this Channel. - // 3. The GCP PubSub Topic (one for the Channel). - // 4. The GCP PubSub Subscriptions (one for each Subscriber of the Channel). + // 2. The GCP PubSub Topic (one for the Channel). + // 3. The GCP PubSub Subscriptions (one for each Subscriber of the Channel). // First we will plan all the names out for steps 3 and 4 persist them to status.internal. Then, on a // subsequent reconcile, we manipulate all the GCP resources in steps 3 and 4. @@ -237,12 +233,6 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) return false, err } - err = r.createVirtualService(ctx, c, svc) - if err != nil { - r.recorder.Eventf(c, v1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err) - return false, err - } - topic, err := r.createTopic(ctx, plannedPCS, gcpCreds) if err != nil { r.recorder.Eventf(c, v1.EventTypeWarning, topicCreateFailed, "Failed to reconcile Topic for the Channel: %v", err) diff --git a/cmd/fanoutsidecar/channelwatcher.go b/pkg/channelwatcher/channelwatcher.go similarity index 98% rename from cmd/fanoutsidecar/channelwatcher.go rename to pkg/channelwatcher/channelwatcher.go index d29884e43d7..1b9f7dcbb2a 100644 --- a/cmd/fanoutsidecar/channelwatcher.go +++ b/pkg/channelwatcher/channelwatcher.go @@ -1,4 +1,4 @@ -package main +package channelwatcher import ( "context" diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index a6a58011042..7df9356cc5e 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -248,6 +248,8 @@ func UpdateChannel(ctx context.Context, client runtimeClient.Client, u *eventing // OwnerReferences on the resource so handleObject can discover the Channel resource that 'owns' it. // As well as being garbage collected when the Channel is deleted. func newK8sService(c *eventingv1alpha1.Channel) *corev1.Service { + // TODO: Need to check if generated name truncates the channel name in case channel name is tool long + // Add annotations return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ GenerateName: channelServiceName(c.ObjectMeta.Name), @@ -262,12 +264,8 @@ func newK8sService(c *eventingv1alpha1.Channel) *corev1.Service { }, }, Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Name: PortName, - Port: PortNumber, - }, - }, + Type: "ExternalName", + ExternalName: names.ServiceHostName(channelDispatcherServiceName(c.Spec.Provisioner.Name), system.Namespace()), }, } } diff --git a/pkg/provisioners/inmemory/channel/controller.go b/pkg/provisioners/inmemory/channel/controller.go index 7ff6128759a..6edf558e411 100644 --- a/pkg/provisioners/inmemory/channel/controller.go +++ b/pkg/provisioners/inmemory/channel/controller.go @@ -19,10 +19,8 @@ package channel import ( eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" - "github.com/knative/pkg/system" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -33,18 +31,6 @@ const ( // controllerAgentName is the string used by this controller to identify // itself when creating events. controllerAgentName = "in-memory-channel-controller" - - // ConfigMapName is the name of the ConfigMap in the knative-eventing namespace that contains - // the subscription information for all in-memory Channels. The Provisioner writes to it and the - // Dispatcher reads from it. - ConfigMapName = "in-memory-channel-dispatcher-config-map" -) - -var ( - defaultConfigMapKey = types.NamespacedName{ - Namespace: system.Namespace(), - Name: ConfigMapName, - } ) // ProvideController returns a Controller that represents the in-memory-channel Provisioner. @@ -52,9 +38,8 @@ func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Cont // Setup a new controller to Reconcile Channels that belong to this Cluster Provisioner // (in-memory channels). r := &reconciler{ - configMapKey: defaultConfigMapKey, - recorder: mgr.GetRecorder(controllerAgentName), - logger: logger, + recorder: mgr.GetRecorder(controllerAgentName), + logger: logger, } c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: r, diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go index 53ed753244e..5db47a21063 100644 --- a/pkg/provisioners/inmemory/channel/reconcile.go +++ b/pkg/provisioners/inmemory/channel/reconcile.go @@ -21,7 +21,6 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" @@ -32,7 +31,6 @@ import ( util "github.com/knative/eventing/pkg/provisioners" ccpcontroller "github.com/knative/eventing/pkg/provisioners/inmemory/clusterchannelprovisioner" "github.com/knative/eventing/pkg/reconciler/names" - "github.com/knative/eventing/pkg/sidecar/configmap" "github.com/knative/eventing/pkg/sidecar/fanout" "github.com/knative/eventing/pkg/sidecar/multichannelfanout" ) @@ -53,8 +51,6 @@ type reconciler struct { client client.Client recorder record.EventRecorder logger *zap.Logger - - configMapKey client.ObjectKey } // Verify the struct implements reconcile.Reconciler @@ -93,9 +89,6 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } logger.Info("Reconciling Channel") - // Modify a copy, not the original. - c = c.DeepCopy() - err = r.reconcile(ctx, c) if err != nil { logger.Info("Error reconciling Channel", zap.Error(err)) @@ -130,16 +123,8 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) // We are syncing three things: // 1. The K8s Service to talk to this Channel. - // 2. The Istio VirtualService to talk to this Channel. // 3. The configuration of all Channel subscriptions. - // We always need to sync the Channel config, so do it first. - if err := r.syncChannelConfig(ctx); err != nil { - logger.Info("Error syncing the Channel config", zap.Error(err)) - r.recorder.Eventf(c, corev1.EventTypeWarning, channelConfigSyncFailed, "Failed to sync Channel config: %v", err) - return err - } - if c.DeletionTimestamp != nil { // K8s garbage collection will delete the K8s service and VirtualService for this channel. // We use a finalizer to ensure the channel config has been synced. @@ -149,89 +134,24 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) util.AddFinalizer(c, finalizerName) + // We use a single dispatcher for both in-memory and in-memory-channel provisioners. + // + originalProvisionerName := c.Spec.Provisioner.Name + c.Spec.Provisioner.Name = defaultProvisionerName svc, err := util.CreateK8sService(ctx, r.client, c) if err != nil { logger.Info("Error creating the Channel's K8s Service", zap.Error(err)) r.recorder.Eventf(c, corev1.EventTypeWarning, k8sServiceCreateFailed, "Failed to reconcile Channel's K8s Service: %v", err) return err } - c.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace)) + c.Spec.Provisioner.Name = originalProvisionerName - if c.Spec.Provisioner.Name == defaultProvisionerName { - _, err = util.CreateVirtualService(ctx, r.client, c, svc) - if err != nil { - logger.Info("Error creating the Virtual Service for the Channel", zap.Error(err)) - r.recorder.Eventf(c, corev1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err) - return err - } - } else { - // We need to have a single dispatcher that is pointed at by _both_ - // ClusterChannelProvisioners. So fake the channel, by saying that its provisioner is the - // one with the single dispatcher. The faked provisioner is used only to determine the - // dispatcher Service's name. - cCopy := c.DeepCopy() - cCopy.Spec.Provisioner.Name = defaultProvisionerName - _, err = util.CreateVirtualService(ctx, r.client, cCopy, svc) - if err != nil { - logger.Info("Error creating the Virtual Service for the Channel", zap.Error(err)) - r.recorder.Eventf(c, corev1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err) - return err - } - } + c.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace)) c.Status.MarkProvisioned() return nil } -func (r *reconciler) syncChannelConfig(ctx context.Context) error { - channels, err := r.listAllChannels(ctx) - if err != nil { - r.logger.Info("Unable to list channels", zap.Error(err)) - return err - } - config := multiChannelFanoutConfig(channels) - return r.writeConfigMap(ctx, config) -} - -func (r *reconciler) writeConfigMap(ctx context.Context, config *multichannelfanout.Config) error { - logger := r.logger.With(zap.Any("configMap", r.configMapKey)) - - updated, err := configmap.SerializeConfig(*config) - if err != nil { - r.logger.Error("Unable to serialize config", zap.Error(err), zap.Any("config", config)) - return err - } - - cm := &corev1.ConfigMap{} - err = r.client.Get(ctx, r.configMapKey, cm) - if errors.IsNotFound(err) { - cm = r.createNewConfigMap(updated) - err = r.client.Create(ctx, cm) - } - if err != nil { - logger.Info("Unable to get/create ConfigMap", zap.Error(err)) - return err - } - - if equality.Semantic.DeepEqual(cm.Data, updated) { - // Nothing to update. - return nil - } - - cm.Data = updated - return r.client.Update(ctx, cm) -} - -func (r *reconciler) createNewConfigMap(data map[string]string) *corev1.ConfigMap { - return &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: r.configMapKey.Namespace, - Name: r.configMapKey.Name, - }, - Data: data, - } -} - func multiChannelFanoutConfig(channels []eventingv1alpha1.Channel) *multichannelfanout.Config { cc := make([]multichannelfanout.ChannelConfig, 0) for _, c := range channels { diff --git a/pkg/provisioners/inmemory/controller/main.go b/pkg/provisioners/inmemory/controller/main.go index d8da2d062b4..99ee64f885e 100644 --- a/pkg/provisioners/inmemory/controller/main.go +++ b/pkg/provisioners/inmemory/controller/main.go @@ -29,6 +29,9 @@ import ( "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" + + // uncomment this line to debug in GKE from local machine + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) func main() { diff --git a/pkg/reconciler/v1alpha1/broker/resources/ingress.go b/pkg/reconciler/v1alpha1/broker/resources/ingress.go index f83a991a7b1..f4eb40cd85c 100644 --- a/pkg/reconciler/v1alpha1/broker/resources/ingress.go +++ b/pkg/reconciler/v1alpha1/broker/resources/ingress.go @@ -58,9 +58,9 @@ func MakeIngress(args *IngressArgs) *appsv1.Deployment { Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: ingressLabels(args.Broker), - Annotations: map[string]string{ - "sidecar.istio.io/inject": "true", - }, + // Annotations: map[string]string{ + // "sidecar.istio.io/inject": "true", + // }, }, Spec: corev1.PodSpec{ ServiceAccountName: args.ServiceAccountName, diff --git a/pkg/sidecar/swappable/swappable.go b/pkg/sidecar/swappable/swappable.go index 70de3edab2c..3cff72630df 100644 --- a/pkg/sidecar/swappable/swappable.go +++ b/pkg/sidecar/swappable/swappable.go @@ -24,6 +24,7 @@ package swappable import ( "errors" + "fmt" "net/http" "sync" "sync/atomic" @@ -102,6 +103,9 @@ func (h *Handler) UpdateConfig(config *multichannelfanout.Config) error { // ServeHTTP delegates all HTTP requests to the current multichannelfanout.Handler. func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // TODO: delete this debugging code + fmt.Sprintf("Request: %+v", r) + // Hand work off to the current multi channel fanout handler. h.logger.Debug("ServeHTTP request received") h.getMultiChannelFanoutHandler().ServeHTTP(w, r) diff --git a/test/crd.go b/test/crd.go index e4a000b2c59..139b33de079 100644 --- a/test/crd.go +++ b/test/crd.go @@ -164,9 +164,8 @@ func EventSenderPod(name string, namespace string, sink string, event CloudEvent return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: map[string]string{"sidecar.istio.io/inject": "true"}, + Name: name, + Namespace: namespace, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{ From 2a4faae5e4969868b1d2f63b3dd87992b816c784 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Tue, 9 Apr 2019 10:33:56 -0700 Subject: [PATCH 03/26] WIP - remove istio dependency from in-memroy channel --- cmd/broker/ingress/main.go | 17 + .../in-memory-channel/in-memory-channel.yaml | 19 +- .../pkg/controller/channel/reconcile.go | 14 +- pkg/provisioners/channel_util.go | 39 +- .../inmemory/channel/controller.go | 10 - .../inmemory/channel/reconcile.go | 44 +- .../inmemory/channel/reconcile_test.go | 375 +---------------- .../clusterchannelprovisioner/reconcile.go | 12 +- .../reconcile_test.go | 65 ++- pkg/provisioners/provisioner_util.go | 24 +- .../filesystem/filesystem_watcher.go | 126 ++++++ .../filesystem/filesystem_watcher_test.go | 379 ++++++++++++++++++ pkg/sidecar/configmap/parse.go | 54 +++ pkg/sidecar/configmap/parse_test.go | 213 ++++++++++ pkg/sidecar/configmap/watcher/watcher.go | 49 +++ pkg/sidecar/configmap/watcher/watcher_test.go | 125 ++++++ .../multi_channel_fanout_handler_test.go | 49 +-- pkg/sidecar/swappable/swappable_test.go | 24 +- 18 files changed, 1127 insertions(+), 511 deletions(-) create mode 100644 pkg/sidecar/configmap/filesystem/filesystem_watcher.go create mode 100644 pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go create mode 100644 pkg/sidecar/configmap/parse.go create mode 100644 pkg/sidecar/configmap/parse_test.go create mode 100644 pkg/sidecar/configmap/watcher/watcher.go create mode 100644 pkg/sidecar/configmap/watcher/watcher_test.go diff --git a/cmd/broker/ingress/main.go b/cmd/broker/ingress/main.go index ea0094fba73..efafda14aae 100644 --- a/cmd/broker/ingress/main.go +++ b/cmd/broker/ingress/main.go @@ -17,10 +17,12 @@ package main import ( + "bytes" "context" "errors" "flag" "fmt" + "io/ioutil" "log" "net/http" "net/url" @@ -221,6 +223,20 @@ func (h *handler) serveHTTP(ctx context.Context, event cloudevents.Event, resp * } func (h *handler) sendEvent(ctx context.Context, tctx cehttp.TransportContext, event cloudevents.Event) error { + + //url := "http://external-service.knative-eventing.svc.cluster.local" + resp, err1 := http.Post(h.channelURI.String(), "application/json", bytes.NewBuffer([]byte{})) + if err1 != nil { + log.Println("Error:", err1) + } + body, err1 := ioutil.ReadAll(resp.Body) + if err1 != nil { + log.Fatalln(err1) + } + log.Println(fmt.Sprintf("Reponse: %+v", resp)) + log.Println(fmt.Sprintf("ReponseBody from server: %v", string(body))) + + fmt.Println("ChannelURI: ", h.channelURI) sendingCTX := broker.SendingContext(ctx, tctx, h.channelURI) startTS := time.Now() @@ -232,6 +248,7 @@ func (h *handler) sendEvent(ctx context.Context, tctx cehttp.TransportContext, e _, err := h.ceClient.Send(sendingCTX, event) if err != nil { sendingCTX, _ = tag.New(sendingCTX, tag.Insert(TagResult, "error")) + fmt.Println("Error: ", err) } else { sendingCTX, _ = tag.New(sendingCTX, tag.Insert(TagResult, "ok")) } diff --git a/config/provisioners/in-memory-channel/in-memory-channel.yaml b/config/provisioners/in-memory-channel/in-memory-channel.yaml index cec26e323e3..3f466ff5e41 100644 --- a/config/provisioners/in-memory-channel/in-memory-channel.yaml +++ b/config/provisioners/in-memory-channel/in-memory-channel.yaml @@ -62,7 +62,6 @@ rules: - apiGroups: - "" # Core API group. resources: - - configmaps - services verbs: - get @@ -83,22 +82,6 @@ rules: - services verbs: - update - - apiGroups: - - "" # Core API Group. - resources: - - configmaps - verbs: - - update - - apiGroups: - - networking.istio.io - resources: - - virtualservices - verbs: - - get - - list - - watch - - create - - update - apiGroups: - "" # Core API Group. resources: @@ -205,6 +188,8 @@ spec: role: dispatcher template: metadata: + annotations: + sidecar.istio.io/inject: "true" labels: *labels spec: serviceAccountName: in-memory-channel-dispatcher diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile.go b/contrib/gcppubsub/pkg/controller/channel/reconcile.go index a1836d83e8a..b446f11de3c 100644 --- a/contrib/gcppubsub/pkg/controller/channel/reconcile.go +++ b/contrib/gcppubsub/pkg/controller/channel/reconcile.go @@ -121,6 +121,9 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } logging.FromContext(ctx).Info("Reconciling Channel") + // Modify a copy, not the original. + c = c.DeepCopy() + ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With(zap.Any("channel", c))) requeue, reconcileErr := r.reconcile(ctx, c) if reconcileErr != nil { @@ -160,8 +163,9 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) // We are syncing four things: // 1. The K8s Service to talk to this Channel. - // 2. The GCP PubSub Topic (one for the Channel). - // 3. The GCP PubSub Subscriptions (one for each Subscriber of the Channel). + // 2. The Istio VirtualService to talk to this Channel. + // 3. The GCP PubSub Topic (one for the Channel). + // 4. The GCP PubSub Subscriptions (one for each Subscriber of the Channel). // First we will plan all the names out for steps 3 and 4 persist them to status.internal. Then, on a // subsequent reconcile, we manipulate all the GCP resources in steps 3 and 4. @@ -233,6 +237,12 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) return false, err } + err = r.createVirtualService(ctx, c, svc) + if err != nil { + r.recorder.Eventf(c, v1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err) + return false, err + } + topic, err := r.createTopic(ctx, plannedPCS, gcpCreds) if err != nil { r.recorder.Eventf(c, v1.EventTypeWarning, topicCreateFailed, "Failed to reconcile Topic for the Channel: %v", err) diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index 7df9356cc5e..d52581c8116 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -63,11 +63,28 @@ func RemoveFinalizer(o metav1.Object, finalizerName string) { o.SetFinalizers(finalizers.List()) } -func CreateK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel) (*corev1.Service, error) { +type k8sServiceOption func(*corev1.Service) error + +// ExternalService is a functional option for CreateK8sService to create a K8s service of type ExternalName +func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { + return func(svc *corev1.Service) error { + svc.Spec = corev1.ServiceSpec{ + Type: "ExternalName", + ExternalName: names.ServiceHostName(channelDispatcherServiceName(c.Spec.Provisioner.Name), system.Namespace()), + } + return nil + } +} + +func CreateK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel, opts ...k8sServiceOption) (*corev1.Service, error) { getSvc := func() (*corev1.Service, error) { return getK8sService(ctx, client, c) } - return createK8sService(ctx, client, getSvc, newK8sService(c)) + svc, err := newK8sService(c, opts...) + if err != nil { + return nil, err + } + return createK8sService(ctx, client, getSvc, svc) } func getK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel) (*corev1.Service, error) { @@ -247,10 +264,10 @@ func UpdateChannel(ctx context.Context, client runtimeClient.Client, u *eventing // newK8sService creates a new Service for a Channel resource. It also sets the appropriate // OwnerReferences on the resource so handleObject can discover the Channel resource that 'owns' it. // As well as being garbage collected when the Channel is deleted. -func newK8sService(c *eventingv1alpha1.Channel) *corev1.Service { +func newK8sService(c *eventingv1alpha1.Channel, opts ...k8sServiceOption) (*corev1.Service, error) { // TODO: Need to check if generated name truncates the channel name in case channel name is tool long // Add annotations - return &corev1.Service{ + svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ GenerateName: channelServiceName(c.ObjectMeta.Name), Namespace: c.Namespace, @@ -264,10 +281,20 @@ func newK8sService(c *eventingv1alpha1.Channel) *corev1.Service { }, }, Spec: corev1.ServiceSpec{ - Type: "ExternalName", - ExternalName: names.ServiceHostName(channelDispatcherServiceName(c.Spec.Provisioner.Name), system.Namespace()), + Ports: []corev1.ServicePort{ + { + Name: PortName, + Port: PortNumber, + }, + }, }, } + for _, opt := range opts { + if err := opt(svc); err != nil { + return nil, err + } + } + return svc, nil } // k8sOldServiceLabels returns a map with only old eventing channel and provisioner labels diff --git a/pkg/provisioners/inmemory/channel/controller.go b/pkg/provisioners/inmemory/channel/controller.go index 6edf558e411..88f0e96233f 100644 --- a/pkg/provisioners/inmemory/channel/controller.go +++ b/pkg/provisioners/inmemory/channel/controller.go @@ -18,7 +18,6 @@ package channel import ( eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -67,14 +66,5 @@ func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Cont return nil, err } - // Watch the VirtualServices that are owned by Channels. - err = c.Watch(&source.Kind{ - Type: &istiov1alpha3.VirtualService{}, - }, &handler.EnqueueRequestForOwner{OwnerType: &eventingv1alpha1.Channel{}, IsController: true}) - if err != nil { - logger.Error("Unable to watch VirtualServices.", zap.Error(err)) - return nil, err - } - return c, nil } diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go index 5db47a21063..fffbdc34c64 100644 --- a/pkg/provisioners/inmemory/channel/reconcile.go +++ b/pkg/provisioners/inmemory/channel/reconcile.go @@ -31,18 +31,14 @@ import ( util "github.com/knative/eventing/pkg/provisioners" ccpcontroller "github.com/knative/eventing/pkg/provisioners/inmemory/clusterchannelprovisioner" "github.com/knative/eventing/pkg/reconciler/names" - "github.com/knative/eventing/pkg/sidecar/fanout" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" ) const ( finalizerName = controllerAgentName // Name of the corev1.Events emitted from the reconciliation process - channelReconciled = "ChannelReconciled" - channelUpdateStatusFailed = "ChannelUpdateStatusFailed" - channelConfigSyncFailed = "ChannelConfigSyncFailed" - k8sServiceCreateFailed = "K8sServiceCreateFailed" - virtualServiceCreateFailed = "VirtualServiceCreateFailed" + channelReconciled = "ChannelReconciled" + channelUpdateStatusFailed = "ChannelUpdateStatusFailed" + k8sServiceCreateFailed = "K8sServiceCreateFailed" // TODO after in-memory-channel is retired, asyncProvisionerName should be removed defaultProvisionerName = "in-memory-channel" ) @@ -126,7 +122,7 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) // 3. The configuration of all Channel subscriptions. if c.DeletionTimestamp != nil { - // K8s garbage collection will delete the K8s service and VirtualService for this channel. + // K8s garbage collection will delete the K8s service for this channel. // We use a finalizer to ensure the channel config has been synced. util.RemoveFinalizer(c, finalizerName) return nil @@ -134,17 +130,12 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) util.AddFinalizer(c, finalizerName) - // We use a single dispatcher for both in-memory and in-memory-channel provisioners. - // - originalProvisionerName := c.Spec.Provisioner.Name - c.Spec.Provisioner.Name = defaultProvisionerName - svc, err := util.CreateK8sService(ctx, r.client, c) + svc, err := util.CreateK8sService(ctx, r.client, c, util.ExternalService(c)) if err != nil { logger.Info("Error creating the Channel's K8s Service", zap.Error(err)) r.recorder.Eventf(c, corev1.EventTypeWarning, k8sServiceCreateFailed, "Failed to reconcile Channel's K8s Service: %v", err) return err } - c.Spec.Provisioner.Name = originalProvisionerName c.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace)) @@ -152,31 +143,6 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) return nil } -func multiChannelFanoutConfig(channels []eventingv1alpha1.Channel) *multichannelfanout.Config { - cc := make([]multichannelfanout.ChannelConfig, 0) - for _, c := range channels { - channelConfig := multichannelfanout.ChannelConfig{ - Namespace: c.Namespace, - Name: c.Name, - } - if c.Spec.Subscribable != nil { - // TODO After in-memory-channel is retired, this logic must be refactored. - asyncHandler := false - if c.Spec.Provisioner.Name != defaultProvisionerName { - asyncHandler = true - } - channelConfig.FanoutConfig = fanout.Config{ - Subscriptions: c.Spec.Subscribable.Subscribers, - AsyncHandler: asyncHandler, - } - } - cc = append(cc, channelConfig) - } - return &multichannelfanout.Config{ - ChannelConfigs: cc, - } -} - func (r *reconciler) listAllChannels(ctx context.Context) ([]eventingv1alpha1.Channel, error) { channels := make([]eventingv1alpha1.Channel, 0) diff --git a/pkg/provisioners/inmemory/channel/reconcile_test.go b/pkg/provisioners/inmemory/channel/reconcile_test.go index 30b9b2ac27b..211f3fc5a03 100644 --- a/pkg/provisioners/inmemory/channel/reconcile_test.go +++ b/pkg/provisioners/inmemory/channel/reconcile_test.go @@ -18,27 +18,25 @@ package channel import ( "context" - "encoding/json" "errors" "fmt" "testing" - "github.com/google/go-cmp/cmp" eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" util "github.com/knative/eventing/pkg/provisioners" + "github.com/knative/eventing/pkg/reconciler/names" controllertesting "github.com/knative/eventing/pkg/reconciler/testing" - "github.com/knative/eventing/pkg/sidecar/configmap" "github.com/knative/eventing/pkg/sidecar/fanout" "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/utils" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "github.com/knative/pkg/system" _ "github.com/knative/pkg/system/testing" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -56,8 +54,6 @@ const ( cmName = "test-config-map" testErrorMessage = "test induced error" - - insertedByVerifyConfigMapData = "data inserted by verifyConfigMapData so that it can be WantPresent" ) var ( @@ -183,11 +179,9 @@ var ( // map of events to set test cases' expectations easier events = map[string]corev1.Event{ - channelReconciled: {Reason: channelReconciled, Type: corev1.EventTypeNormal}, - channelUpdateStatusFailed: {Reason: channelUpdateStatusFailed, Type: corev1.EventTypeWarning}, - channelConfigSyncFailed: {Reason: channelConfigSyncFailed, Type: corev1.EventTypeWarning}, - k8sServiceCreateFailed: {Reason: k8sServiceCreateFailed, Type: corev1.EventTypeWarning}, - virtualServiceCreateFailed: {Reason: virtualServiceCreateFailed, Type: corev1.EventTypeWarning}, + channelReconciled: {Reason: channelReconciled, Type: corev1.EventTypeNormal}, + channelUpdateStatusFailed: {Reason: channelUpdateStatusFailed, Type: corev1.EventTypeWarning}, + k8sServiceCreateFailed: {Reason: k8sServiceCreateFailed, Type: corev1.EventTypeWarning}, } ) @@ -250,23 +244,6 @@ func TestReconcile(t *testing.T) { makeChannelWithWrongProvisionerName(), }, }, - { - Name: "Channel deleted - Channel config sync fails", - InitialState: []runtime.Object{ - makeDeletingChannel(), - }, - Mocks: controllertesting.Mocks{ - MockLists: errorListingChannels(), - }, - WantPresent: []runtime.Object{ - // Finalizer has not been removed. - makeDeletingChannel(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[channelConfigSyncFailed], - }, - }, { Name: "Channel deleted - finalizer removed", InitialState: []runtime.Object{ @@ -279,64 +256,10 @@ func TestReconcile(t *testing.T) { events[channelReconciled], }, }, - { - Name: "Channel config sync fails - can't list Channels", - InitialState: []runtime.Object{ - makeChannel(), - }, - Mocks: controllertesting.Mocks{ - MockLists: errorListingChannels(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[channelConfigSyncFailed], - }, - }, - { - Name: "Channel config sync fails - can't get ConfigMap", - InitialState: []runtime.Object{ - makeChannel(), - }, - Mocks: controllertesting.Mocks{ - MockGets: errorGettingConfigMap(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[channelConfigSyncFailed], - }, - }, - { - Name: "Channel config sync fails - can't create ConfigMap", - InitialState: []runtime.Object{ - makeChannel(), - }, - Mocks: controllertesting.Mocks{ - MockCreates: errorCreatingConfigMap(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[channelConfigSyncFailed], - }, - }, - { - Name: "Channel config sync fails - can't update ConfigMap", - InitialState: []runtime.Object{ - makeChannel(), - makeConfigMap(), - }, - Mocks: controllertesting.Mocks{ - MockUpdates: errorUpdatingConfigMap(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[channelConfigSyncFailed], - }, - }, { Name: "K8s service get fails", InitialState: []runtime.Object{ makeChannel(), - makeConfigMap(), }, Mocks: controllertesting.Mocks{ MockLists: errorListingK8sService(), @@ -353,7 +276,6 @@ func TestReconcile(t *testing.T) { Name: "K8s service creation fails", InitialState: []runtime.Object{ makeChannel(), - makeConfigMap(), }, Mocks: controllertesting.Mocks{ MockCreates: errorCreatingK8sService(), @@ -367,54 +289,11 @@ func TestReconcile(t *testing.T) { events[k8sServiceCreateFailed], }, }, - { - Name: "Virtual service get fails", - InitialState: []runtime.Object{ - makeChannel(), - makeConfigMap(), - makeK8sService(), - makeVirtualService(), - }, - Mocks: controllertesting.Mocks{ - MockLists: errorListingVirtualService(), - }, - WantPresent: []runtime.Object{ - // TODO: This should have a useful error message saying that the VirtualService - // failed. - makeChannelWithFinalizerAndAddress(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[virtualServiceCreateFailed], - }, - }, - { - Name: "Virtual service creation fails", - InitialState: []runtime.Object{ - makeChannel(), - makeConfigMap(), - makeK8sService(), - }, - Mocks: controllertesting.Mocks{ - MockCreates: errorCreatingVirtualService(), - }, - WantPresent: []runtime.Object{ - // TODO: This should have a useful error message saying that the VirtualService - // failed. - makeChannelWithFinalizerAndAddress(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[virtualServiceCreateFailed], - }, - }, { Name: "Channel get for update fails", InitialState: []runtime.Object{ makeChannel(), - makeConfigMap(), makeK8sService(), - makeVirtualService(), }, Mocks: controllertesting.Mocks{ MockGets: errorOnSecondChannelGet(), @@ -428,9 +307,7 @@ func TestReconcile(t *testing.T) { Name: "Channel update fails", InitialState: []runtime.Object{ makeChannel(), - makeConfigMap(), makeK8sService(), - makeVirtualService(), }, Mocks: controllertesting.Mocks{ MockUpdates: errorUpdatingChannel(), @@ -443,9 +320,7 @@ func TestReconcile(t *testing.T) { Name: "Channel status update fails", InitialState: []runtime.Object{ makeChannel(), - makeConfigMap(), makeK8sService(), - makeVirtualService(), }, Mocks: controllertesting.Mocks{ MockStatusUpdates: errorUpdatingChannelStatus(), @@ -454,83 +329,14 @@ func TestReconcile(t *testing.T) { WantEvent: []corev1.Event{ events[channelReconciled], events[channelUpdateStatusFailed], }, - }, { - Name: "Channel reconcile successful - Channel list follows pagination", - InitialState: []runtime.Object{ - makeChannel(), - makeConfigMap(), - }, - Mocks: controllertesting.Mocks{ - MockLists: (&paginatedChannelsListStruct{channels: channels}).MockLists(), - // This is more accurate to be in WantPresent, but we need to check JSON equality, - // not string equality, so it can't be done in WantPresent. Instead, we verify - // during the update call, swapping out the data and WantPresent with that inserted - // data. - MockUpdates: verifyConfigMapData(channelsConfig), - }, - WantPresent: []runtime.Object{ - makeReadyChannel(), - makeK8sService(), - makeVirtualService(), - makeConfigMapWithVerifyConfigMapData(), - }, - WantEvent: []corev1.Event{ - events[channelReconciled], - }, - }, - { - Name: "Channel reconcile successful - Channel has no subscribers", - InitialState: []runtime.Object{ - makeChannel(), - makeConfigMap(), - }, - Mocks: controllertesting.Mocks{ - MockLists: (&paginatedChannelsListStruct{channels: []eventingv1alpha1.Channel{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: "high-consul", - Name: "duarte", - }, - Spec: eventingv1alpha1.ChannelSpec{ - Provisioner: &corev1.ObjectReference{ - Name: ccpName, - }, - }, - }, - }}).MockLists(), - // This is more accurate to be in WantPresent, but we need to check JSON equality, - // not string equality, so it can't be done in WantPresent. Instead, we verify - // during the update call, swapping out the data and WantPresent with that inserted - // data. - MockUpdates: verifyConfigMapData(multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "high-consul", - Name: "duarte", - }, - }, - }), - }, - WantPresent: []runtime.Object{ - makeReadyChannel(), - makeK8sService(), - makeVirtualService(), - makeConfigMapWithVerifyConfigMapData(), - }, - WantEvent: []corev1.Event{ - events[channelReconciled], - }, }, { Name: "Channel reconcile successful - Async channel", - // VirtualService should have channel provisioner name - // defaults to in-memory-channel but the service should match provisioner's service name InitialState: []runtime.Object{ makeChannel("in-memory"), }, Mocks: controllertesting.Mocks{}, WantPresent: []runtime.Object{ - makeVirtualService(), makeK8sService("in-memory"), }, WantEvent: []corev1.Event{ @@ -539,14 +345,11 @@ func TestReconcile(t *testing.T) { }, { Name: "Channel reconcile successful - Non Async channel", - // VirtualService should have channel provisioner name - // defaults to in-memory-channel InitialState: []runtime.Object{ makeChannel(), }, Mocks: controllertesting.Mocks{}, WantPresent: []runtime.Object{ - makeVirtualService(), makeK8sService(), }, WantEvent: []corev1.Event{ @@ -556,17 +359,12 @@ func TestReconcile(t *testing.T) { } for _, tc := range testCases { - configMapKey := types.NamespacedName{ - Namespace: cmNamespace, - Name: cmName, - } c := tc.GetClient() recorder := tc.GetEventRecorder() r := &reconciler{ - client: c, - recorder: recorder, - logger: zap.NewNop(), - configMapKey: configMapKey, + client: c, + recorder: recorder, + logger: zap.NewNop(), } if tc.ReconcileKey == "" { tc.ReconcileKey = fmt.Sprintf("/%s", cName) @@ -656,26 +454,6 @@ func makeDeletingChannelWithoutFinalizer() *eventingv1alpha1.Channel { return c } -func makeConfigMap() *corev1.ConfigMap { - return &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: cmNamespace, - Name: cmName, - }, - } -} - -func makeConfigMapWithVerifyConfigMapData() *corev1.ConfigMap { - cm := makeConfigMap() - cm.Data = map[string]string{} - cm.Data[configmap.MultiChannelFanoutConfigKey] = insertedByVerifyConfigMapData - return cm -} - func makeK8sService(pn ...string) *corev1.Service { return &corev1.Service{ TypeMeta: metav1.TypeMeta{ @@ -703,60 +481,8 @@ func makeK8sService(pn ...string) *corev1.Service { }, }, Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Name: util.PortName, - Port: util.PortNumber, - }, - }, - }, - } -} - -func makeVirtualService() *istiov1alpha3.VirtualService { - return &istiov1alpha3.VirtualService{ - TypeMeta: metav1.TypeMeta{ - APIVersion: istiov1alpha3.SchemeGroupVersion.String(), - Kind: "VirtualService", - }, - ObjectMeta: metav1.ObjectMeta{ - GenerateName: fmt.Sprintf("%s-channel-", cName), - Namespace: cNamespace, - Labels: map[string]string{ - util.EventingChannelLabel: cName, - util.OldEventingChannelLabel: cName, - util.EventingProvisionerLabel: ccpName, - util.OldEventingProvisionerLabel: ccpName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), - Kind: "Channel", - Name: cName, - UID: cUID, - Controller: &truePointer, - BlockOwnerDeletion: &truePointer, - }, - }, - }, - Spec: istiov1alpha3.VirtualServiceSpec{ - Hosts: []string{ - serviceAddress, - fmt.Sprintf("%s.%s.channels.%s", cName, cNamespace, utils.GetClusterDomainName()), - }, - HTTP: []istiov1alpha3.HTTPRoute{{ - Rewrite: &istiov1alpha3.HTTPRewrite{ - Authority: fmt.Sprintf("%s.%s.channels.%s", cName, cNamespace, utils.GetClusterDomainName()), - }, - Route: []istiov1alpha3.DestinationWeight{{ - Destination: istiov1alpha3.Destination{ - Host: "in-memory-channel-dispatcher.knative-testing.svc." + utils.GetClusterDomainName(), - Port: istiov1alpha3.PortSelector{ - Number: util.PortNumber, - }, - }}, - }}, - }, + ExternalName: names.ServiceHostName(fmt.Sprintf("%s-dispatcher", getProvisionerName(pn)), system.Namespace()), + Type: "ExternalName", }, } } @@ -780,18 +506,6 @@ func errorGettingChannel() []controllertesting.MockGet { }, } } - -func errorGettingConfigMap() []controllertesting.MockGet { - return []controllertesting.MockGet{ - func(_ client.Client, _ context.Context, _ client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) { - if _, ok := obj.(*corev1.ConfigMap); ok { - return controllertesting.Handled, errors.New(testErrorMessage) - } - return controllertesting.Unhandled, nil - }, - } -} - func errorListingK8sService() []controllertesting.MockList { return []controllertesting.MockList{ func(_ client.Client, _ context.Context, _ *client.ListOptions, obj runtime.Object) (controllertesting.MockHandled, error) { @@ -803,17 +517,6 @@ func errorListingK8sService() []controllertesting.MockList { } } -func errorListingVirtualService() []controllertesting.MockList { - return []controllertesting.MockList{ - func(_ client.Client, _ context.Context, _ *client.ListOptions, obj runtime.Object) (controllertesting.MockHandled, error) { - if _, ok := obj.(*istiov1alpha3.VirtualServiceList); ok { - return controllertesting.Handled, errors.New(testErrorMessage) - } - return controllertesting.Unhandled, nil - }, - } -} - func errorListingChannels() []controllertesting.MockList { return []controllertesting.MockList{ func(client.Client, context.Context, *client.ListOptions, runtime.Object) (controllertesting.MockHandled, error) { @@ -822,17 +525,6 @@ func errorListingChannels() []controllertesting.MockList { } } -func errorCreatingConfigMap() []controllertesting.MockCreate { - return []controllertesting.MockCreate{ - func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { - if _, ok := obj.(*corev1.ConfigMap); ok { - return controllertesting.Handled, errors.New(testErrorMessage) - } - return controllertesting.Unhandled, nil - }, - } -} - func errorCreatingK8sService() []controllertesting.MockCreate { return []controllertesting.MockCreate{ func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { @@ -844,17 +536,6 @@ func errorCreatingK8sService() []controllertesting.MockCreate { } } -func errorCreatingVirtualService() []controllertesting.MockCreate { - return []controllertesting.MockCreate{ - func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { - if _, ok := obj.(*istiov1alpha3.VirtualService); ok { - return controllertesting.Handled, errors.New(testErrorMessage) - } - return controllertesting.Unhandled, nil - }, - } -} - func errorUpdatingChannel() []controllertesting.MockUpdate { return []controllertesting.MockUpdate{ func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { @@ -877,17 +558,6 @@ func errorUpdatingChannelStatus() []controllertesting.MockStatusUpdate { } } -func errorUpdatingConfigMap() []controllertesting.MockUpdate { - return []controllertesting.MockUpdate{ - func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { - if _, ok := obj.(*corev1.ConfigMap); ok { - return controllertesting.Handled, errors.New(testErrorMessage) - } - return controllertesting.Unhandled, nil - }, - } -} - type paginatedChannelsListStruct struct { channels []eventingv1alpha1.Channel } @@ -911,28 +581,3 @@ func (p *paginatedChannelsListStruct) MockLists() []controllertesting.MockList { }, } } - -func verifyConfigMapData(expected multichannelfanout.Config) []controllertesting.MockUpdate { - return []controllertesting.MockUpdate{ - func(innerClient client.Client, ctx context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { - if cm, ok := obj.(*corev1.ConfigMap); ok { - s := cm.Data[configmap.MultiChannelFanoutConfigKey] - c := multichannelfanout.Config{} - err := json.Unmarshal([]byte(s), &c) - if err != nil { - return controllertesting.Handled, - fmt.Errorf("test is unable to unmarshal ConfigMap data: %v", err) - } - if diff := cmp.Diff(c, expected); diff != "" { - return controllertesting.Handled, - fmt.Errorf("test got unwanted ChannelsConfig (-want +got) %s", diff) - } - // Verified it is correct, now so that we can verify this actually occurred, swap - // out the data with a known value for later comparison. - cm.Data[configmap.MultiChannelFanoutConfigKey] = insertedByVerifyConfigMapData - return controllertesting.Handled, innerClient.Update(ctx, obj) - } - return controllertesting.Unhandled, nil - }, - } -} diff --git a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go index 5e79fc3c802..5794b2dbafc 100644 --- a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go +++ b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go @@ -22,6 +22,7 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -153,7 +154,7 @@ func (r *reconciler) reconcile(ctx context.Context, ccp *eventingv1alpha1.Cluste return nil } - svc, err := util.CreateDispatcherService(ctx, r.client, ccp) + svc, err := util.CreateDispatcherService(ctx, r.client, ccp, setDispatcherServiceSelector()) if err != nil { logger.Info("Error creating the ClusterChannelProvisioner's K8s Service", zap.Error(err)) @@ -179,6 +180,15 @@ func (r *reconciler) reconcile(ctx context.Context, ccp *eventingv1alpha1.Cluste return nil } +// Since there are two provisioners "in-memry" and "in-memory-channel" but one single dispatcher service deployment, +// update the label of the K8s service to always point at the same dispatcher service deployment +func setDispatcherServiceSelector() util.ServiceOption { + return func(svc *v1.Service) error { + svc.Spec.Selector = util.DispatcherLabels("in-memory-channel") + return nil + } +} + func (r *reconciler) deleteOldDispatcherService(ctx context.Context, ccp *eventingv1alpha1.ClusterChannelProvisioner) error { svcName := fmt.Sprintf("%s-clusterbus", ccp.Name) svcKey := types.NamespacedName{ diff --git a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go index e4ff44abb9b..036e1424235 100644 --- a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go +++ b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go @@ -40,10 +40,11 @@ import ( ) const ( - ccpUID = "test-uid" - testErrorMessage = "test-induced-error" - testNS = "test-ns" - Name = "in-memory-channel" + ccpUID = "test-uid" + testErrorMessage = "test-induced-error" + testNS = "test-ns" + inMemoryChannelName = "in-memory-channel" + inMemoryName = "in-memory" ) var ( @@ -96,7 +97,7 @@ func TestIsControlled(t *testing.T) { "wrong namespace": { ref: &corev1.ObjectReference{ Namespace: "other", - Name: Name, + Name: inMemoryName, }, isControlled: false, }, @@ -108,7 +109,7 @@ func TestIsControlled(t *testing.T) { }, "is controlled": { ref: &corev1.ObjectReference{ - Name: Name, + Name: inMemoryName, }, isControlled: true, }, @@ -143,7 +144,7 @@ func TestReconcile(t *testing.T) { &eventingv1alpha1.ClusterChannelProvisioner{ ObjectMeta: metav1.ObjectMeta{ Namespace: "not empty string", - Name: Name, + Name: inMemoryName, }, }, }, @@ -240,6 +241,20 @@ func TestReconcile(t *testing.T) { events[ccpReconciled], }, }, + { + Name: "Create dispatcher succeeds - in-memory-Channel", + ReconcileKey: inMemoryChannelName, + InitialState: []runtime.Object{ + makeClusterChannelProvisionerOld(), + }, + WantPresent: []runtime.Object{ + makeReadyClusterChannelProvisionerOld(), + makeK8sServiceOld(), + }, + WantEvent: []corev1.Event{ + events[ccpReconciled], + }, + }, { Name: "Create dispatcher succeeds - request is namespace-scoped", InitialState: []runtime.Object{ @@ -249,7 +264,7 @@ func TestReconcile(t *testing.T) { makeReadyClusterChannelProvisioner(), makeK8sService(), }, - ReconcileKey: fmt.Sprintf("%s/%s", testNS, Name), + ReconcileKey: fmt.Sprintf("%s/%s", testNS, inMemoryName), WantEvent: []corev1.Event{ events[ccpReconciled], }, @@ -297,13 +312,19 @@ func TestReconcile(t *testing.T) { logger: zap.NewNop(), } if tc.ReconcileKey == "" { - tc.ReconcileKey = fmt.Sprintf("/%s", Name) + tc.ReconcileKey = fmt.Sprintf("/%s", inMemoryName) } tc.IgnoreTimes = true t.Run(tc.Name, tc.Runner(t, r, c, recorder)) } } +func makeClusterChannelProvisionerOld() *eventingv1alpha1.ClusterChannelProvisioner { + ccp := makeClusterChannelProvisioner() + ccp.SetName(inMemoryChannelName) + return ccp +} + func makeClusterChannelProvisioner() *eventingv1alpha1.ClusterChannelProvisioner { return &eventingv1alpha1.ClusterChannelProvisioner{ TypeMeta: metav1.TypeMeta{ @@ -311,7 +332,7 @@ func makeClusterChannelProvisioner() *eventingv1alpha1.ClusterChannelProvisioner Kind: "ClusterChannelProvisioner", }, ObjectMeta: metav1.ObjectMeta{ - Name: Name, + Name: inMemoryName, UID: ccpUID, }, Spec: eventingv1alpha1.ClusterChannelProvisionerSpec{}, @@ -328,6 +349,12 @@ func makeReadyClusterChannelProvisioner() *eventingv1alpha1.ClusterChannelProvis return ccp } +func makeReadyClusterChannelProvisionerOld() *eventingv1alpha1.ClusterChannelProvisioner { + ccp := makeReadyClusterChannelProvisioner() + ccp.Name = inMemoryChannelName + return ccp +} + func makeDeletingClusterChannelProvisioner() *eventingv1alpha1.ClusterChannelProvisioner { ccp := makeClusterChannelProvisioner() ccp.DeletionTimestamp = &deletionTime @@ -342,21 +369,21 @@ func makeK8sService() *corev1.Service { }, ObjectMeta: metav1.ObjectMeta{ Namespace: system.Namespace(), - Name: fmt.Sprintf("%s-dispatcher", Name), + Name: fmt.Sprintf("%s-dispatcher", inMemoryName), OwnerReferences: []metav1.OwnerReference{ { APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), Kind: "ClusterChannelProvisioner", - Name: Name, + Name: inMemoryName, UID: ccpUID, Controller: &truePointer, BlockOwnerDeletion: &truePointer, }, }, - Labels: util.DispatcherLabels(Name), + Labels: util.DispatcherLabels(inMemoryName), }, Spec: corev1.ServiceSpec{ - Selector: util.DispatcherLabels(Name), + Selector: util.DispatcherLabels(inMemoryChannelName), Ports: []corev1.ServicePort{ { Name: "http", @@ -368,9 +395,17 @@ func makeK8sService() *corev1.Service { } } +func makeK8sServiceOld() *corev1.Service { + svc := makeK8sService() + svc.SetName(fmt.Sprintf("%s-dispatcher", inMemoryChannelName)) + svc.GetOwnerReferences()[0].Name = inMemoryChannelName + svc.SetLabels(util.DispatcherLabels(inMemoryChannelName)) + return svc +} + func makeOldK8sService() *corev1.Service { svc := makeK8sService() - svc.ObjectMeta.Name = fmt.Sprintf("%s-clusterbus", Name) + svc.ObjectMeta.Name = fmt.Sprintf("%s-clusterbus", inMemoryName) return svc } diff --git a/pkg/provisioners/provisioner_util.go b/pkg/provisioners/provisioner_util.go index 4afe9d4aea0..7003250345c 100644 --- a/pkg/provisioners/provisioner_util.go +++ b/pkg/provisioners/provisioner_util.go @@ -5,6 +5,7 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -19,7 +20,10 @@ import ( "github.com/knative/pkg/system" ) -func CreateDispatcherService(ctx context.Context, client runtimeClient.Client, ccp *eventingv1alpha1.ClusterChannelProvisioner) (*corev1.Service, error) { +// ServiceOption can be used to optionally modify the K8s default that gets created for the Dispatcher in CreateDispatcherService +type ServiceOption func(*v1.Service) error + +func CreateDispatcherService(ctx context.Context, client runtimeClient.Client, ccp *eventingv1alpha1.ClusterChannelProvisioner, opts ...ServiceOption) (*corev1.Service, error) { svcKey := types.NamespacedName{ Namespace: system.Namespace(), Name: channelDispatcherServiceName(ccp.Name), @@ -29,7 +33,12 @@ func CreateDispatcherService(ctx context.Context, client runtimeClient.Client, c err := client.Get(ctx, svcKey, svc) return svc, err } - return createK8sService(ctx, client, getSvc, newDispatcherService(ccp)) + svc, err := newDispatcherService(ccp, opts...) + if err != nil { + return nil, err + } + + return createK8sService(ctx, client, getSvc, svc) } func UpdateClusterChannelProvisionerStatus(ctx context.Context, client runtimeClient.Client, u *eventingv1alpha1.ClusterChannelProvisioner) error { @@ -50,9 +59,9 @@ func UpdateClusterChannelProvisionerStatus(ctx context.Context, client runtimeCl // newDispatcherService creates a new Service for a ClusterChannelProvisioner resource. It also sets // the appropriate OwnerReferences on the resource so handleObject can discover // the ClusterChannelProvisioner resource that 'owns' it. -func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner) *corev1.Service { +func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner, opts ...ServiceOption) (*corev1.Service, error) { labels := DispatcherLabels(ccp.Name) - return &corev1.Service{ + svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: channelDispatcherServiceName(ccp.Name), Namespace: system.Namespace(), @@ -76,6 +85,13 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner) *core }, }, } + + for _, opt := range opts { + if err := opt(svc); err != nil { + return svc, err + } + } + return svc, nil } func DispatcherLabels(ccpName string) map[string]string { diff --git a/pkg/sidecar/configmap/filesystem/filesystem_watcher.go b/pkg/sidecar/configmap/filesystem/filesystem_watcher.go new file mode 100644 index 00000000000..12f5042d51e --- /dev/null +++ b/pkg/sidecar/configmap/filesystem/filesystem_watcher.go @@ -0,0 +1,126 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "errors" + + "github.com/fsnotify/fsnotify" + sidecarconfigmap "github.com/knative/eventing/pkg/sidecar/configmap" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/eventing/pkg/sidecar/swappable" + "github.com/knative/pkg/configmap" + "go.uber.org/zap" +) + +const ( + // ConfigDir is the mount path of the configMap volume. + ConfigDir = "/etc/config/fanout_sidecar" +) + +// Monitors an attached ConfigMap volume for updated configuration and calls `configUpdated` when +// the value changes. +type ConfigMapWatcher struct { + logger *zap.Logger + // The directory to read the configMap from. + dir string + // Stop the watcher by closing this channel. + watcherStopCh chan<- bool + + // The function to call when the configuration is updated. + configUpdated swappable.UpdateConfig +} + +// NewConfigMapWatcher creates a new filesystem.ConfigMapWatcher. The caller is responsible for +// calling Start(<-chan), likely via a controller-runtime Manager. +func NewConfigMapWatcher(logger *zap.Logger, dir string, updateConfig swappable.UpdateConfig) (*ConfigMapWatcher, error) { + conf, err := readConfigMap(logger, dir) + if err != nil { + logger.Error("Unable to read configMap", zap.Error(err)) + return nil, err + } + + logger.Info("Read initial configMap", zap.Any("conf", conf)) + + err = updateConfig(conf) + if err != nil { + logger.Error("Unable to use the initial configMap: %v", zap.Error(err)) + return nil, err + } + + cmw := &ConfigMapWatcher{ + logger: logger, + dir: dir, + configUpdated: updateConfig, + } + return cmw, nil +} + +// readConfigMap attempts to read the configMap from the attached volume. +func readConfigMap(logger *zap.Logger, dir string) (*multichannelfanout.Config, error) { + cm, err := configmap.Load(dir) + if err != nil { + return nil, err + } + return sidecarconfigmap.NewFanoutConfig(logger, cm) +} + +// updateConfig reads the configMap data and calls `configUpdated` with the updated value. +func (cmw *ConfigMapWatcher) updateConfig() { + conf, err := readConfigMap(cmw.logger, cmw.dir) + if err != nil { + cmw.logger.Error("Unable to read the configMap", zap.Error(err)) + return + } + err = cmw.configUpdated(conf) + if err != nil { + cmw.logger.Error("Unable to update config", zap.Error(err)) + return + } +} + +// Start implements controller runtime's manager.Runnable. +func (cmw *ConfigMapWatcher) Start(stopCh <-chan struct{}) error { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return err + } + + err = watcher.Add(cmw.dir) + if err != nil { + return err + } + + for { + select { + case _, ok := <-watcher.Events: + if !ok { + // Channel closed. + return errors.New("watcher.Events channel closed") + } + cmw.updateConfig() + case e, ok := <-watcher.Errors: + if !ok { + // Channel closed. + return errors.New("watcher.Errors channel closed") + } + cmw.logger.Error("watcher.Errors", zap.Error(e)) + case <-stopCh: + return watcher.Close() + } + } +} diff --git a/pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go b/pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go new file mode 100644 index 00000000000..84a0ac83912 --- /dev/null +++ b/pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go @@ -0,0 +1,379 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + "github.com/knative/eventing/pkg/sidecar/configmap" + "github.com/knative/eventing/pkg/sidecar/fanout" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/eventing/pkg/utils" + "go.uber.org/zap" + yaml "gopkg.in/yaml.v2" +) + +func TestReadConfigMap(t *testing.T) { + testCases := []struct { + name string + createDir bool + config string + expected *multichannelfanout.Config + expectedErr bool + }{ + { + name: "dir does not exist", + createDir: false, + }, + { + name: "no data", + createDir: true, + expectedErr: true, + }, + { + name: "invalid YAML", + createDir: true, + config: ` + key: + - value + - different indent level + `, + expectedErr: true, + }, + { + name: "valid YAML -- invalid JSON", + config: "{ nil: Key }", + createDir: true, + expectedErr: true, + }, + { + name: "unknown field", + config: "{ channelConfigs: [ { not: a-defined-field } ] }", + createDir: true, + expectedErr: true, + }, + { + name: "valid", + createDir: true, + config: ` + channelConfigs: + - namespace: default + name: c1 + fanoutConfig: + subscriptions: + - subscriberURI: event-changer.default.svc.` + utils.GetClusterDomainName() + ` + replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` + - subscriberURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` + - replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` + - namespace: default + name: c2 + fanoutConfig: + subscriptions: + - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` + - namespace: other + name: c3 + fanoutConfig: + subscriptions: + - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName(), + expected: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "c1", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + SubscriberURI: "event-changer.default.svc." + utils.GetClusterDomainName(), + ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), + }, + { + SubscriberURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), + }, + { + ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), + }, + }, + }, + }, + { + Namespace: "default", + Name: "c2", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), + }, + }, + }, + }, + { + Namespace: "other", + Name: "c3", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), + }, + }, + }, + }, + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var dir string + if tc.createDir { + var cleanup func() + dir, cleanup = createTempDir(t) + defer cleanup() + } else { + dir = "/tmp/doesNotExist" + } + writeConfigString(t, dir, tc.config) + c, e := readConfigMap(zap.NewNop(), dir) + if tc.expectedErr { + if e == nil { + t.Errorf("Expected an error, actual nil") + } + return + } + if !cmp.Equal(c, tc.expected) { + t.Errorf("Unexpected config. Expected '%v'. Actual '%v'.", tc.expected, c) + } + }) + } +} + +func TestWatch(t *testing.T) { + testCases := map[string]struct { + initialConfigErr error + initialConfig *multichannelfanout.Config + updateConfigErr error + updateConfig *multichannelfanout.Config + }{ + "error applying initial config": { + initialConfig: &multichannelfanout.Config{}, + initialConfigErr: errors.New("test-induced error"), + }, + "read initial config": { + initialConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "c1", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "foo.bar", + }, + }, + }, + }, + }, + }, + }, + "error apply updated config": { + initialConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "c1", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "foo.bar", + }, + }, + }, + }, + }, + }, + updateConfigErr: errors.New("test-induced error"), + }, + "update config": { + initialConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "c1", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "foo.bar", + }, + }, + }, + }, + }, + }, + updateConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "new-channel", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + SubscriberURI: "baz.qux", + }, + }, + }, + }, + }, + }, + }, + } + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + dir, cleanup := createTempDir(t) + defer cleanup() + writeConfig(t, dir, tc.initialConfig) + + cuc := &configUpdatedChecker{ + updateConfigErr: tc.initialConfigErr, + } + cmw, err := NewConfigMapWatcher(zap.NewNop(), dir, cuc.updateConfig) + if err != nil { + if tc.initialConfigErr != err { + t.Errorf("Unexpected error making ConfigMapWatcher. Expected: '%v'. Actual '%v'", tc.initialConfigErr, err) + } + return + } + ac := cuc.getConfig() + if !cmp.Equal(tc.initialConfig, ac) { + t.Errorf("Unexpected initial config. Expected '%v'. Actual '%v'", tc.initialConfig, ac) + } + + stopCh := make(chan struct{}) + go func() { + _ = cmw.Start(stopCh) + }() + defer func() { + close(stopCh) + }() + // Sadly, the test is flaky unless we sleep here, waiting for the file system + // watcher to truly start. + time.Sleep(100 * time.Millisecond) + + if tc.updateConfigErr != nil { + cuc.updateConfigErr = tc.updateConfigErr + } + + expected := tc.initialConfig + if tc.updateConfig != nil { + expected = tc.updateConfig + } + + cuc.updateCalled = make(chan struct{}, 1) + writeConfig(t, dir, expected) + // The watcher is running in another goroutine, give it some time to notice the + // change. + select { + case <-cuc.updateCalled: + break + case <-time.After(5 * time.Second): + t.Errorf("Time out waiting for watcher to notice change.") + } + + ac = cuc.getConfig() + if !cmp.Equal(ac, expected) { + t.Errorf("Unexpected update config. Expected '%v'. Actual '%v'", expected, ac) + } + }) + } +} + +type configUpdatedChecker struct { + configLock sync.Mutex + config *multichannelfanout.Config + updateCalled chan struct{} + updateConfigErr error +} + +func (cuc *configUpdatedChecker) updateConfig(config *multichannelfanout.Config) error { + cuc.configLock.Lock() + defer cuc.configLock.Unlock() + cuc.config = config + if cuc.updateCalled != nil { + cuc.updateCalled <- struct{}{} + } + return cuc.updateConfigErr +} + +func (cuc *configUpdatedChecker) getConfig() *multichannelfanout.Config { + cuc.configLock.Lock() + defer cuc.configLock.Unlock() + return cuc.config +} + +func createTempDir(t *testing.T) (string, func()) { + dir, err := ioutil.TempDir("", "configMapHandlerTest") + if err != nil { + t.Errorf("Unable to make temp directory: %v", err) + } + return dir, func() { + _ = os.RemoveAll(dir) + } +} + +func writeConfig(t *testing.T, dir string, config *multichannelfanout.Config) { + if config != nil { + yb, err := yaml.Marshal(config) + if err != nil { + t.Errorf("Unable to marshal the config") + } + writeConfigString(t, dir, string(yb)) + } +} + +func writeConfigString(t *testing.T, dir, config string) { + if config != "" { + // Golang editors tend to replace leading spaces with tabs. YAML is left whitespace + // sensitive, so let's replace the tabs with spaces. + leftSpaceConfig := strings.Replace(config, "\t", " ", -1) + err := atomicWriteFile(t, fmt.Sprintf("%s/%s", dir, configmap.MultiChannelFanoutConfigKey), []byte(leftSpaceConfig), 0700) + if err != nil { + t.Errorf("Problem writing the config file: %v", err) + } + } +} + +func atomicWriteFile(t *testing.T, file string, bytes []byte, perm os.FileMode) error { + // In order to more closely replicate how K8s writes ConfigMaps to the file system, we will + // atomically swap out the file by writing it to a temp directory, then renaming it into the + // directory we are watching. + tempDir, cleanup := createTempDir(t) + defer cleanup() + + tempFile := fmt.Sprintf("%s/%s", tempDir, "temp") + err := ioutil.WriteFile(tempFile, bytes, perm) + if err != nil { + return err + } + return os.Rename(tempFile, file) +} diff --git a/pkg/sidecar/configmap/parse.go b/pkg/sidecar/configmap/parse.go new file mode 100644 index 00000000000..ba6da64f12c --- /dev/null +++ b/pkg/sidecar/configmap/parse.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configmap + +import ( + "encoding/json" + "fmt" + + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "go.uber.org/zap" +) + +const ( + // MultiChannelFanoutConfigKey is the key in the ConfigMap that contains all the configuration + // data. + MultiChannelFanoutConfigKey = "multiChannelFanoutConfig" +) + +// NewFanoutConfig attempts to parse the config map's data into a multichannelfanout.Config. +// orig == NewFanoutConfig(SerializeConfig(orig)) +func NewFanoutConfig(logger *zap.Logger, data map[string]string) (*multichannelfanout.Config, error) { + str, present := data[MultiChannelFanoutConfigKey] + if !present { + logger.Error("Expected key not found", zap.String("key", MultiChannelFanoutConfigKey)) + return nil, fmt.Errorf("expected key not found: %v", MultiChannelFanoutConfigKey) + } + return multichannelfanout.Parse(logger, str) +} + +// SerializeConfig takes in a multichannelfanout.Config and generates the ConfigMap equivalent. +// orig == NewFanoutConfig(SerializeConfig(orig)) +func SerializeConfig(config multichannelfanout.Config) (map[string]string, error) { + jb, err := json.Marshal(config) + if err != nil { + return nil, err + } + return map[string]string{ + MultiChannelFanoutConfigKey: string(jb), + }, nil +} diff --git a/pkg/sidecar/configmap/parse_test.go b/pkg/sidecar/configmap/parse_test.go new file mode 100644 index 00000000000..cee271ce090 --- /dev/null +++ b/pkg/sidecar/configmap/parse_test.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configmap + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + "github.com/knative/eventing/pkg/sidecar/fanout" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/eventing/pkg/utils" + "go.uber.org/zap" +) + +func TestNewFanoutConfig(t *testing.T) { + testCases := []struct { + name string + config string + expected *multichannelfanout.Config + expectedErr bool + }{ + { + name: "no data", + expectedErr: true, + }, + { + name: "invalid YAML", + config: ` + key: + - value + - different indent level + `, + expectedErr: true, + }, + { + name: "valid YAML -- invalid JSON", + config: "{ nil: Key }", + expectedErr: true, + }, + { + name: "unknown field", + config: "{ channelConfigs: [ { not: a-defined-field } ] }", + expectedErr: true, + }, + { + name: "valid", + config: ` + channelConfigs: + - namespace: default + name: c1 + fanoutConfig: + subscriptions: + - subscriberURI: event-changer.default.svc.` + utils.GetClusterDomainName() + ` + replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` + - subscriberURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` + - replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` + - namespace: default + name: c2 + fanoutConfig: + subscriptions: + - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` + - namespace: other + name: c3 + fanoutConfig: + subscriptions: + - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName(), + expected: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "c1", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + SubscriberURI: "event-changer.default.svc." + utils.GetClusterDomainName(), + ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), + }, + { + SubscriberURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), + }, + { + ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), + }, + }, + }, + }, + { + Namespace: "default", + Name: "c2", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), + }, + }, + }, + }, + { + Namespace: "other", + Name: "c3", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), + }, + }, + }, + }, + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + data := formatData(tc.config) + c, e := NewFanoutConfig(zap.NewNop(), data) + if tc.expectedErr { + if e == nil { + t.Errorf("Expected an error, actual nil") + } + return + } + if !cmp.Equal(c, tc.expected) { + t.Errorf("Unexpected config. Expected '%v'. Actual '%v'.", tc.expected, c) + } + }) + } +} + +func TestSerializeConfig(t *testing.T) { + testCases := map[string]struct { + config *multichannelfanout.Config + }{ + "empty config": { + config: &multichannelfanout.Config{}, + }, + "full config": { + config: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "c1", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + SubscriberURI: "foo.example.com", + ReplyURI: "bar.example.com", + }, + { + ReplyURI: "qux.example.com", + }, + { + SubscriberURI: "baz.example.com", + }, + {}, + }, + }, + }, + { + Namespace: "other", + Name: "no-subs", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{}, + }, + }, + }, + }, + }, + } + + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + s, err := SerializeConfig(*tc.config) + if err != nil { + t.Errorf("Unexpected error serializing config: %v", err) + } + rt, err := NewFanoutConfig(zap.NewNop(), s) + if err != nil { + t.Errorf("Unexpected error deserializing: %v", err) + } + if diff := cmp.Diff(tc.config, rt); diff != "" { + t.Errorf("Unexpected error roundtripping the config (-want, +got): %v", diff) + } + }) + } +} + +func formatData(config string) map[string]string { + data := make(map[string]string) + if config != "" { + // Golang editors tend to replace leading spaces with tabs. YAML is left whitespace + // sensitive and disallows tabs, so let's replace the tabs with four spaces. + leftSpaceConfig := strings.Replace(config, "\t", " ", -1) + data[MultiChannelFanoutConfigKey] = leftSpaceConfig + } + return data +} diff --git a/pkg/sidecar/configmap/watcher/watcher.go b/pkg/sidecar/configmap/watcher/watcher.go new file mode 100644 index 00000000000..01dc5d7af9a --- /dev/null +++ b/pkg/sidecar/configmap/watcher/watcher.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watcher + +import ( + sidecarconfigmap "github.com/knative/eventing/pkg/sidecar/configmap" + "github.com/knative/eventing/pkg/sidecar/swappable" + "github.com/knative/pkg/configmap" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +// NewWatcher creates a new InformedWatcher that watches the specified ConfigMap and on any change +// that results in a valid multichannelfanout.Config calls configUpdated. +func NewWatcher(logger *zap.Logger, kc kubernetes.Interface, cmNamespace, cmName string, configUpdated swappable.UpdateConfig) (manager.Runnable, error) { + iw := configmap.NewInformedWatcher(kc, cmNamespace) + iw.Watch(cmName, func(cm *corev1.ConfigMap) { + config, err := sidecarconfigmap.NewFanoutConfig(logger, cm.Data) + if err != nil { + logger.Error("Could not parse ConfigMap", zap.Error(err), + zap.Any("configMap.Data", cm.Data)) + return + } + + err = configUpdated(config) + if err != nil { + logger.Error("Unable to update config", zap.Error(err)) + return + } + }) + + return iw, nil +} diff --git a/pkg/sidecar/configmap/watcher/watcher_test.go b/pkg/sidecar/configmap/watcher/watcher_test.go new file mode 100644 index 00000000000..6164c38cd63 --- /dev/null +++ b/pkg/sidecar/configmap/watcher/watcher_test.go @@ -0,0 +1,125 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watcher + +import ( + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + sidecarconfigmap "github.com/knative/eventing/pkg/sidecar/configmap" + "github.com/knative/eventing/pkg/sidecar/fanout" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/pkg/configmap" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + namespace = "test-namespace" + name = "test-name" +) + +func TestReconcile(t *testing.T) { + testCases := map[string]struct { + config map[string]string + updateConfigErr error + expectedConfig *multichannelfanout.Config + }{ + "missing key": { + config: map[string]string{}, + expectedConfig: nil, + }, + "cannot parse cm": { + config: map[string]string{ + sidecarconfigmap.MultiChannelFanoutConfigKey: "invalid config", + }, + expectedConfig: nil, + }, + "configUpdated fails": { + config: map[string]string{ + sidecarconfigmap.MultiChannelFanoutConfigKey: "", + }, + updateConfigErr: errors.New("test-error"), + expectedConfig: &multichannelfanout.Config{}, + }, + "success": { + config: map[string]string{ + sidecarconfigmap.MultiChannelFanoutConfigKey: ` + channelConfigs: + - name: foo + namespace: bar + fanoutConfig: + subscriptions: + - subscriberURI: subscriber + replyURI: reply`, + }, + expectedConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Name: "foo", + Namespace: "bar", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + SubscriberURI: "subscriber", + ReplyURI: "reply", + }, + }, + }, + }, + }, + }, + }, + } + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + cuc := &configUpdatedChecker{ + updateConfigErr: tc.updateConfigErr, + } + + r, err := NewWatcher(zap.NewNop(), nil, namespace, name, cuc.updateConfig) + if err != nil { + t.Errorf("Error creating watcher: %v", err) + } + iw := r.(*configmap.InformedWatcher) + iw.OnChange(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Data: tc.config, + }) + + if diff := cmp.Diff(tc.expectedConfig, cuc.config); diff != "" { + t.Errorf("Unexpected config (-want +got): %v", diff) + } + }) + } +} + +type configUpdatedChecker struct { + config *multichannelfanout.Config + updateConfigErr error +} + +func (cuc *configUpdatedChecker) updateConfig(config *multichannelfanout.Config) error { + cuc.config = config + return cuc.updateConfigErr +} diff --git a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler_test.go b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler_test.go index e6c9c30d048..32b86bdc84a 100644 --- a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler_test.go +++ b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler_test.go @@ -34,33 +34,6 @@ const ( replaceDomain = "replaceDomain" ) -func TestMakeChannelKey(t *testing.T) { - testCases := []struct { - namespace string - name string - key string - }{ - { - namespace: "default", - name: "channel", - key: "default/channel", - }, - { - namespace: "foo", - name: "bar", - key: "foo/bar", - }, - } - for _, tc := range testCases { - name := fmt.Sprintf("%s, %s -> %s", tc.namespace, tc.name, tc.key) - t.Run(name, func(t *testing.T) { - if key := makeChannelKey(tc.namespace, tc.name); key != tc.key { - t.Errorf("Unexpected ChannelKey. Expected '%v'. Actual '%v'", tc.key, key) - } - }) - } -} - func TestNewHandler(t *testing.T) { testCases := []struct { name string @@ -72,16 +45,14 @@ func TestNewHandler(t *testing.T) { config: Config{ ChannelConfigs: []ChannelConfig{ { - Namespace: "default", - Name: "duplicate", + HostName: "duplicatekey", }, { - Namespace: "default", - Name: "duplicate", + HostName: "duplicatekey", }, }, }, - createErr: "duplicate channel key: default/duplicate", + createErr: "duplicate channel key: duplicatekey", }, } @@ -241,8 +212,9 @@ func TestServeHTTP(t *testing.T) { config: Config{ ChannelConfigs: []ChannelConfig{ { - Namespace: "default", - Name: "first-channel", + Namespace: "ns", + Name: "name", + HostName: "first-channel.default", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -261,8 +233,10 @@ func TestServeHTTP(t *testing.T) { config: Config{ ChannelConfigs: []ChannelConfig{ { - Namespace: "default", - Name: "first-channel", + + Namespace: "ns", + Name: "name", + HostName: "first-channel.default", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -274,6 +248,7 @@ func TestServeHTTP(t *testing.T) { { Namespace: "default", Name: "second-channel", + HostName: "second-channel.default", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -303,7 +278,7 @@ func TestServeHTTP(t *testing.T) { h, err := NewHandler(zap.NewNop(), tc.config) if err != nil { - t.Errorf("Unexpected NewHandler error: '%v'", err) + t.Fatalf("Unexpected NewHandler error: '%v'", err) } r := requestWithChannelKey(tc.key) diff --git a/pkg/sidecar/swappable/swappable_test.go b/pkg/sidecar/swappable/swappable_test.go index 7ee97d00955..b4cc0daa872 100644 --- a/pkg/sidecar/swappable/swappable_test.go +++ b/pkg/sidecar/swappable/swappable_test.go @@ -30,9 +30,8 @@ import ( ) const ( - namespace = "default" - name = "channel1" replaceDomain = "replaceDomain" + hostName = "a.b.c.d" ) func TestHandler(t *testing.T) { @@ -44,8 +43,7 @@ func TestHandler(t *testing.T) { { ChannelConfigs: []multichannelfanout.ChannelConfig{ { - Namespace: namespace, - Name: name, + HostName: hostName, FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -59,8 +57,7 @@ func TestHandler(t *testing.T) { { ChannelConfigs: []multichannelfanout.ChannelConfig{ { - Namespace: namespace, - Name: name, + HostName: hostName, FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -96,8 +93,7 @@ func TestHandler_InvalidConfigChange(t *testing.T) { initialConfig: multichannelfanout.Config{ ChannelConfigs: []multichannelfanout.ChannelConfig{ { - Namespace: namespace, - Name: name, + HostName: hostName, FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -112,12 +108,10 @@ func TestHandler_InvalidConfigChange(t *testing.T) { // Duplicate (namespace, name). ChannelConfigs: []multichannelfanout.ChannelConfig{ { - Namespace: namespace, - Name: name, + HostName: hostName, }, { - Namespace: namespace, - Name: name, + HostName: hostName, }, }, }, @@ -183,7 +177,7 @@ func updateConfigAndTest(t *testing.T, h *Handler, config multichannelfanout.Con func assertRequestAccepted(t *testing.T, h *Handler) { w := httptest.NewRecorder() - h.ServeHTTP(w, makeRequest(namespace, name)) + h.ServeHTTP(w, makeRequest(hostName)) if w.Code != http.StatusAccepted { t.Errorf("Unexpected response code. Expected 202. Actual %v", w.Code) } @@ -196,8 +190,8 @@ func (*successHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { _ = r.Body.Close() } -func makeRequest(namespace, name string) *http.Request { - r := httptest.NewRequest("POST", fmt.Sprintf("http://%s.%s/", name, namespace), strings.NewReader("")) +func makeRequest(hostName string) *http.Request { + r := httptest.NewRequest("POST", fmt.Sprintf("http://%s/", hostName), strings.NewReader("")) return r } From bd7ae6832f033bea79a57224b6a004e4d74e5d52 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Tue, 9 Apr 2019 17:28:41 -0700 Subject: [PATCH 04/26] UTs pass, E2E tests pass with in-memory as well as kafka --- cmd/broker/ingress/main.go | 17 ----------------- cmd/fanoutsidecar/main.go | 3 +-- .../{channelwatcher.go => channel_watcher.go} | 0 pkg/provisioners/channel_util.go | 12 +++++++----- .../clusterchannelprovisioner/reconcile.go | 2 +- pkg/provisioners/provisioner_util.go | 5 ++++- .../v1alpha1/broker/resources/ingress.go | 7 ++++--- .../multi_channel_fanout_handler.go | 6 +++--- pkg/sidecar/swappable/swappable.go | 4 ---- 9 files changed, 20 insertions(+), 36 deletions(-) rename pkg/channelwatcher/{channelwatcher.go => channel_watcher.go} (100%) diff --git a/cmd/broker/ingress/main.go b/cmd/broker/ingress/main.go index efafda14aae..ea0094fba73 100644 --- a/cmd/broker/ingress/main.go +++ b/cmd/broker/ingress/main.go @@ -17,12 +17,10 @@ package main import ( - "bytes" "context" "errors" "flag" "fmt" - "io/ioutil" "log" "net/http" "net/url" @@ -223,20 +221,6 @@ func (h *handler) serveHTTP(ctx context.Context, event cloudevents.Event, resp * } func (h *handler) sendEvent(ctx context.Context, tctx cehttp.TransportContext, event cloudevents.Event) error { - - //url := "http://external-service.knative-eventing.svc.cluster.local" - resp, err1 := http.Post(h.channelURI.String(), "application/json", bytes.NewBuffer([]byte{})) - if err1 != nil { - log.Println("Error:", err1) - } - body, err1 := ioutil.ReadAll(resp.Body) - if err1 != nil { - log.Fatalln(err1) - } - log.Println(fmt.Sprintf("Reponse: %+v", resp)) - log.Println(fmt.Sprintf("ReponseBody from server: %v", string(body))) - - fmt.Println("ChannelURI: ", h.channelURI) sendingCTX := broker.SendingContext(ctx, tctx, h.channelURI) startTS := time.Now() @@ -248,7 +232,6 @@ func (h *handler) sendEvent(ctx context.Context, tctx cehttp.TransportContext, e _, err := h.ceClient.Send(sendingCTX, event) if err != nil { sendingCTX, _ = tag.New(sendingCTX, tag.Insert(TagResult, "error")) - fmt.Println("Error: ", err) } else { sendingCTX, _ = tag.New(sendingCTX, tag.Insert(TagResult, "ok")) } diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 9787fdfaecb..6392dd91e57 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -40,9 +40,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" - // uncomment this line to debug in GKE from local machine - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) var ( diff --git a/pkg/channelwatcher/channelwatcher.go b/pkg/channelwatcher/channel_watcher.go similarity index 100% rename from pkg/channelwatcher/channelwatcher.go rename to pkg/channelwatcher/channel_watcher.go diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index d52581c8116..1fc8026dd27 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -123,12 +123,14 @@ func createK8sService(ctx context.Context, client runtimeClient.Client, getSvc g } else if err != nil { return nil, err } - // spec.clusterIP is immutable and is set on existing services. If we don't set this // to the same value, we will encounter an error while updating. svc.Spec.ClusterIP = current.Spec.ClusterIP if !equality.Semantic.DeepDerivative(svc.Spec, current.Spec) || - !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) { + !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) || + // This DeepEqual is necessary to force update dispatcher services when upgrading from 0.5 to 0.6. + // Above DeepDerivative will not work because we have removed an optional field (name) from ports + !equality.Semantic.DeepEqual(svc.Spec.Ports, current.Spec.Ports) { current.Spec = svc.Spec current.ObjectMeta.Labels = addExpectedLabels(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) err = client.Update(ctx, current) @@ -265,7 +267,6 @@ func UpdateChannel(ctx context.Context, client runtimeClient.Client, u *eventing // OwnerReferences on the resource so handleObject can discover the Channel resource that 'owns' it. // As well as being garbage collected when the Channel is deleted. func newK8sService(c *eventingv1alpha1.Channel, opts ...k8sServiceOption) (*corev1.Service, error) { - // TODO: Need to check if generated name truncates the channel name in case channel name is tool long // Add annotations svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -283,8 +284,9 @@ func newK8sService(c *eventingv1alpha1.Channel, opts ...k8sServiceOption) (*core Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ { - Name: PortName, - Port: PortNumber, + Name: PortName, + Protocol: corev1.ProtocolTCP, + Port: PortNumber, }, }, }, diff --git a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go index 5794b2dbafc..678c544d46a 100644 --- a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go +++ b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go @@ -180,7 +180,7 @@ func (r *reconciler) reconcile(ctx context.Context, ccp *eventingv1alpha1.Cluste return nil } -// Since there are two provisioners "in-memry" and "in-memory-channel" but one single dispatcher service deployment, +// Since there are two provisioners "in-memory" and "in-memory-channel" but one single dispatcher service deployment, // update the label of the K8s service to always point at the same dispatcher service deployment func setDispatcherServiceSelector() util.ServiceOption { return func(svc *v1.Service) error { diff --git a/pkg/provisioners/provisioner_util.go b/pkg/provisioners/provisioner_util.go index 7003250345c..a65551fcc3f 100644 --- a/pkg/provisioners/provisioner_util.go +++ b/pkg/provisioners/provisioner_util.go @@ -78,8 +78,11 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner, opts Selector: labels, Ports: []corev1.ServicePort{ { - Name: "http", + // There is a bug in Istio where named port doesn't work when connecting using an ExternalName service + // Refer to https://github.com/istio/istio/issues/13193 for more details. + // TODO: Revert this when ISTIO fixes the issue Port: 80, + Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(8080), }, }, diff --git a/pkg/reconciler/v1alpha1/broker/resources/ingress.go b/pkg/reconciler/v1alpha1/broker/resources/ingress.go index f4eb40cd85c..8df5a57f841 100644 --- a/pkg/reconciler/v1alpha1/broker/resources/ingress.go +++ b/pkg/reconciler/v1alpha1/broker/resources/ingress.go @@ -58,9 +58,10 @@ func MakeIngress(args *IngressArgs) *appsv1.Deployment { Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: ingressLabels(args.Broker), - // Annotations: map[string]string{ - // "sidecar.istio.io/inject": "true", - // }, + // TODO: Remove this annotation once all channels stop using istio virtual service + Annotations: map[string]string{ + "sidecar.istio.io/inject": "true", + }, }, Spec: corev1.PodSpec{ ServiceAccountName: args.ServiceAccountName, diff --git a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go index a2f24cbc6d8..282a1c0985d 100644 --- a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go +++ b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go @@ -42,9 +42,9 @@ type Config struct { // ChannelConfig is the configuration for a single Channel. type ChannelConfig struct { - Namespace string `json:"namespace"` - Name string `json:"name"` - HostName string + Namespace string `json:"namespace"` + Name string `json:"name"` + HostName string `json:"hostname"` FanoutConfig fanout.Config `json:"fanoutConfig"` } diff --git a/pkg/sidecar/swappable/swappable.go b/pkg/sidecar/swappable/swappable.go index 3cff72630df..70de3edab2c 100644 --- a/pkg/sidecar/swappable/swappable.go +++ b/pkg/sidecar/swappable/swappable.go @@ -24,7 +24,6 @@ package swappable import ( "errors" - "fmt" "net/http" "sync" "sync/atomic" @@ -103,9 +102,6 @@ func (h *Handler) UpdateConfig(config *multichannelfanout.Config) error { // ServeHTTP delegates all HTTP requests to the current multichannelfanout.Handler. func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // TODO: delete this debugging code - fmt.Sprintf("Request: %+v", r) - // Hand work off to the current multi channel fanout handler. h.logger.Debug("ServeHTTP request received") h.getMultiChannelFanoutHandler().ServeHTTP(w, r) From df4487f10c86dd6b35e5ab47a516aa039db8829d Mon Sep 17 00:00:00 2001 From: akashrv Date: Wed, 10 Apr 2019 06:51:24 -0700 Subject: [PATCH 05/26] fixed uts that failed due to last K8s service change --- .../controller/clusterchannelprovisioner/reconcile_test.go | 2 +- pkg/provisioners/channel_util_test.go | 5 +++-- .../inmemory/clusterchannelprovisioner/reconcile_test.go | 2 +- pkg/provisioners/provisioner_util_test.go | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/contrib/natss/pkg/controller/clusterchannelprovisioner/reconcile_test.go b/contrib/natss/pkg/controller/clusterchannelprovisioner/reconcile_test.go index e2f043d9231..57a70ade635 100644 --- a/contrib/natss/pkg/controller/clusterchannelprovisioner/reconcile_test.go +++ b/contrib/natss/pkg/controller/clusterchannelprovisioner/reconcile_test.go @@ -254,7 +254,7 @@ func makeK8sService() *corev1.Service { Selector: provisioners.DispatcherLabels(Name), Ports: []corev1.ServicePort{ { - Name: "http", + Protocol: corev1.ProtocolTCP, Port: 80, TargetPort: intstr.FromInt(8080), }, diff --git a/pkg/provisioners/channel_util_test.go b/pkg/provisioners/channel_util_test.go index 6aded3735d3..4f89ffb7b56 100644 --- a/pkg/provisioners/channel_util_test.go +++ b/pkg/provisioners/channel_util_test.go @@ -597,8 +597,9 @@ func makeK8sService() *corev1.Service { Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ { - Name: PortName, - Port: PortNumber, + Name: PortName, + Port: PortNumber, + Protocol: corev1.ProtocolTCP, }, }, }, diff --git a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go index 036e1424235..9d8934d0c61 100644 --- a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go +++ b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go @@ -386,9 +386,9 @@ func makeK8sService() *corev1.Service { Selector: util.DispatcherLabels(inMemoryChannelName), Ports: []corev1.ServicePort{ { - Name: "http", Port: 80, TargetPort: intstr.FromInt(8080), + Protocol: corev1.ProtocolTCP, }, }, }, diff --git a/pkg/provisioners/provisioner_util_test.go b/pkg/provisioners/provisioner_util_test.go index cdf2eb724e6..fcd6a9dafe1 100644 --- a/pkg/provisioners/provisioner_util_test.go +++ b/pkg/provisioners/provisioner_util_test.go @@ -180,9 +180,9 @@ func makeDispatcherService() *corev1.Service { Selector: DispatcherLabels(clusterChannelProvisionerName), Ports: []corev1.ServicePort{ { - Name: "http", Port: 80, TargetPort: intstr.FromInt(8080), + Protocol: corev1.ProtocolTCP, }, }, }, From 23ae8b4457032e08f4f5cb5422ce5135f1b46fb2 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Wed, 10 Apr 2019 16:14:55 -0700 Subject: [PATCH 06/26] Removed unnecessary space from a line --- config/provisioners/in-memory-channel/in-memory-channel.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/provisioners/in-memory-channel/in-memory-channel.yaml b/config/provisioners/in-memory-channel/in-memory-channel.yaml index 3f466ff5e41..d1b30298273 100644 --- a/config/provisioners/in-memory-channel/in-memory-channel.yaml +++ b/config/provisioners/in-memory-channel/in-memory-channel.yaml @@ -188,7 +188,7 @@ spec: role: dispatcher template: metadata: - annotations: + annotations: sidecar.istio.io/inject: "true" labels: *labels spec: From bb7ab3e84fbbee2e7b6144e5df9d199512840e04 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Wed, 10 Apr 2019 16:19:26 -0700 Subject: [PATCH 07/26] dding istio annotation to test POD. This will ve needed when running E2E tests against channels other than in-memory --- test/crd.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/crd.go b/test/crd.go index 139b33de079..dd29cd53534 100644 --- a/test/crd.go +++ b/test/crd.go @@ -74,6 +74,9 @@ func Configuration(name string, namespace string, imagePath string) *servingv1al // ClusterChannelProvisioner returns a ClusterChannelProvisioner for a given name func ClusterChannelProvisioner(name string) *corev1.ObjectReference { + if name == "" { + return nil + } return pkgTest.CoreV1ObjectReference("ClusterChannelProvisioner", eventsApiVersion, name) } @@ -164,8 +167,9 @@ func EventSenderPod(name string, namespace string, sink string, event CloudEvent return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, + Name: name, + Namespace: namespace, + Annotations: map[string]string{"sidecar.istio.io/inject": "true"}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{ From c646dcd49ab85272d84a605abfb1dd124e24a832 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Thu, 11 Apr 2019 10:36:16 -0700 Subject: [PATCH 08/26] Bug fix to set clusterIp of K8s service only when it is not of type ExternalName --- pkg/provisioners/channel_util.go | 6 ++++-- pkg/provisioners/inmemory/channel/reconcile_test.go | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index 1fc8026dd27..3ca480a6582 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -69,7 +69,7 @@ type k8sServiceOption func(*corev1.Service) error func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { return func(svc *corev1.Service) error { svc.Spec = corev1.ServiceSpec{ - Type: "ExternalName", + Type: corev1.ServiceTypeExternalName, ExternalName: names.ServiceHostName(channelDispatcherServiceName(c.Spec.Provisioner.Name), system.Namespace()), } return nil @@ -125,7 +125,9 @@ func createK8sService(ctx context.Context, client runtimeClient.Client, getSvc g } // spec.clusterIP is immutable and is set on existing services. If we don't set this // to the same value, we will encounter an error while updating. - svc.Spec.ClusterIP = current.Spec.ClusterIP + if svc.Spec.Type != corev1.ServiceTypeExternalName { + svc.Spec.ClusterIP = current.Spec.ClusterIP + } if !equality.Semantic.DeepDerivative(svc.Spec, current.Spec) || !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) || // This DeepEqual is necessary to force update dispatcher services when upgrading from 0.5 to 0.6. diff --git a/pkg/provisioners/inmemory/channel/reconcile_test.go b/pkg/provisioners/inmemory/channel/reconcile_test.go index 211f3fc5a03..76aa9e06f95 100644 --- a/pkg/provisioners/inmemory/channel/reconcile_test.go +++ b/pkg/provisioners/inmemory/channel/reconcile_test.go @@ -482,7 +482,7 @@ func makeK8sService(pn ...string) *corev1.Service { }, Spec: corev1.ServiceSpec{ ExternalName: names.ServiceHostName(fmt.Sprintf("%s-dispatcher", getProvisionerName(pn)), system.Namespace()), - Type: "ExternalName", + Type: corev1.ServiceTypeExternalName, }, } } From 485f6b3b93c88099c431a578e1e54c5b4901db85 Mon Sep 17 00:00:00 2001 From: akashrv Date: Thu, 11 Apr 2019 10:40:08 -0700 Subject: [PATCH 09/26] WIP kafka channel --- cmd/fanoutsidecar/main.go | 23 +-- contrib/kafka/cmd/dispatcher/main.go | 68 +++++---- contrib/kafka/config/kafka.yaml | 9 +- .../kafka/pkg/controller/channel/provider.go | 24 +-- .../kafka/pkg/controller/channel/reconcile.go | 140 +----------------- .../pkg/controller/channel/reconcile_test.go | 70 ++++----- pkg/provisioners/channel_util.go | 3 +- .../v1alpha1/broker/resources/ingress.go | 4 - pkg/sidecar/multichannelfanout/config.go | 67 +++++++++ .../multi_channel_fanout_handler.go | 14 -- 10 files changed, 156 insertions(+), 266 deletions(-) create mode 100644 pkg/sidecar/multichannelfanout/config.go diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 6392dd91e57..ed2fe9a2d6d 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -30,7 +30,6 @@ import ( "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/channelwatcher" "github.com/knative/eventing/pkg/logging" - "github.com/knative/eventing/pkg/sidecar/fanout" "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/sidecar/swappable" "go.uber.org/zap" @@ -145,7 +144,7 @@ func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.Wat logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) return err } - config := multiChannelFanoutConfig(channels) + config := multichannelfanout.NewConfigFromChannels(channels) return updateConfig(config) } } @@ -175,26 +174,6 @@ func shouldWatch(ch *v1alpha1.Channel) bool { return false } -func multiChannelFanoutConfig(channels []v1alpha1.Channel) *multichannelfanout.Config { - cc := make([]multichannelfanout.ChannelConfig, 0) - for _, c := range channels { - channelConfig := multichannelfanout.ChannelConfig{ - Namespace: c.Namespace, - Name: c.Name, - HostName: c.Status.Address.Hostname, - } - if c.Spec.Subscribable != nil { - channelConfig.FanoutConfig = fanout.Config{ - Subscriptions: c.Spec.Subscribable.Subscribers, - } - } - cc = append(cc, channelConfig) - } - return &multichannelfanout.Config{ - ChannelConfigs: cc, - } -} - // runnableServer is a small wrapper around http.Server so that it matches the manager.Runnable // interface. type runnableServer struct { diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index 9ef18689623..6748e52c088 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -17,33 +17,27 @@ limitations under the License. package main import ( + "context" "flag" - "fmt" "log" - "os" + "github.com/knative/eventing/contrib/kafka/pkg/controller" provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" - "github.com/knative/eventing/pkg/sidecar/configmap/watcher" - "github.com/knative/eventing/pkg/utils" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/channelwatcher" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/eventing/pkg/sidecar/swappable" "github.com/knative/pkg/signals" - "github.com/knative/pkg/system" "go.uber.org/zap" - "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" ) func main() { - configMapName := os.Getenv("DISPATCHER_CONFIGMAP_NAME") - if configMapName == "" { - configMapName = provisionerController.DispatcherConfigMapName - } - configMapNamespace := os.Getenv("DISPATCHER_CONFIGMAP_NAMESPACE") - if configMapNamespace == "" { - configMapNamespace = system.Namespace() - } - flag.Parse() logger, err := zap.NewProduction() if err != nil { @@ -68,17 +62,10 @@ func main() { logger.Fatal("Unable to add kafkaDispatcher", zap.Error(err)) } - kc, err := kubernetes.NewForConfig(mgr.GetConfig()) - if err != nil { - logger.Fatal("unable to create kubernetes client.", zap.Error(err)) - } - - cmw, err := watcher.NewWatcher(logger, kc, configMapNamespace, configMapName, kafkaDispatcher.UpdateConfig) + v1alpha1.AddToScheme(mgr.GetScheme()) + channelwatcher.New(mgr, logger, updateChannelConfig(kafkaDispatcher.UpdateConfig)) if err != nil { - logger.Fatal("unable to create configMap watcher", zap.String("configMap", fmt.Sprintf("%s/%s", configMapNamespace, configMapName))) - } - if err = mgr.Add(utils.NewBlockingStart(logger, cmw)); err != nil { - logger.Fatal("Unable to add the configMap watcher to the manager", zap.Error(err)) + logger.Fatal("Unable to create channel watcher.", zap.Error(err)) } // set up signals so we handle the first shutdown signal gracefully @@ -89,3 +76,34 @@ func main() { } logger.Info("Exiting...") } +func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.WatchHandlerFunc { + return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { + channels, err := listAllChannels(ctx, c) + if err != nil { + logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) + return err + } + config := multichannelfanout.NewConfigFromChannels(channels) + return updateConfig(config) + } +} + +func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) { + channels := make([]v1alpha1.Channel, 0) + cl := &v1alpha1.ChannelList{} + if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { + return nil, err + } + for _, c := range cl.Items { + if c.Status.IsReady() && shouldWatch(&c) { + channels = append(channels, c) + } + } + return channels, nil +} + +func shouldWatch(ch *v1alpha1.Channel) bool { + return ch.Spec.Provisioner != nil && + ch.Spec.Provisioner.Namespace == "" && + ch.Spec.Provisioner.Name == controller.Name +} diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index dc28a8636da..d0b25649e00 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -108,7 +108,7 @@ metadata: namespace: knative-eventing data: # Broker URL's for the provisioner. Replace this with the URL's for your kafka cluster. - bootstrap_servers: kafkabroker.kafka:9092 + bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. @@ -211,13 +211,6 @@ spec: containers: - name: dispatcher image: github.com/knative/eventing/contrib/kafka/cmd/dispatcher - env: - - name: DISPATCHER_CONFIGMAP_NAME - value: kafka-channel-dispatcher - - name: DISPATCHER_CONFIGMAP_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace volumeMounts: - name: kafka-channel-controller-config mountPath: /etc/config-provisioner diff --git a/contrib/kafka/pkg/controller/channel/provider.go b/contrib/kafka/pkg/controller/channel/provider.go index 7c9d413d246..73eab2e8d22 100644 --- a/contrib/kafka/pkg/controller/channel/provider.go +++ b/contrib/kafka/pkg/controller/channel/provider.go @@ -18,7 +18,6 @@ package channel import ( "github.com/Shopify/sarama" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -49,11 +48,10 @@ var ( ) type reconciler struct { - client client.Client - recorder record.EventRecorder - logger *zap.Logger - config *common.KafkaProvisionerConfig - configMapKey client.ObjectKey + client client.Client + recorder record.EventRecorder + logger *zap.Logger + config *common.KafkaProvisionerConfig // Using a shared kafkaClusterAdmin does not work currently because of an issue with // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. kafkaClusterAdmin sarama.ClusterAdmin @@ -67,10 +65,9 @@ func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfi // Setup a new controller to Reconcile Channel. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ - recorder: mgr.GetRecorder(controllerAgentName), - logger: logger, - config: config, - configMapKey: defaultConfigMapKey, + recorder: mgr.GetRecorder(controllerAgentName), + logger: logger, + config: config, }, }) if err != nil { @@ -89,13 +86,6 @@ func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfi return nil, err } - // Watch the VirtualServices that are owned by Channels. - err = c.Watch(&source.Kind{Type: &istiov1alpha3.VirtualService{}}, &handler.EnqueueRequestForOwner{OwnerType: &eventingv1alpha1.Channel{}, IsController: true}) - if err != nil { - logger.Error("unable to watch VirtualServices.", zap.Error(err)) - return nil, err - } - return c, nil } diff --git a/contrib/kafka/pkg/controller/channel/reconcile.go b/contrib/kafka/pkg/controller/channel/reconcile.go index cf64955bc43..34a7e1c9b71 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile.go +++ b/contrib/kafka/pkg/controller/channel/reconcile.go @@ -23,10 +23,8 @@ import ( "github.com/Shopify/sarama" "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -35,10 +33,6 @@ import ( util "github.com/knative/eventing/pkg/provisioners" topicUtils "github.com/knative/eventing/pkg/provisioners/utils" eventingNames "github.com/knative/eventing/pkg/reconciler/names" - "github.com/knative/eventing/pkg/sidecar/configmap" - "github.com/knative/eventing/pkg/sidecar/fanout" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "k8s.io/apimachinery/pkg/api/equality" ) const ( @@ -97,30 +91,28 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err return reconcile.Result{}, nil } - newChannel := channel.DeepCopy() - - newChannel.Status.InitializeConditions() + channel.Status.InitializeConditions() var requeue = false if clusterChannelProvisioner.Status.IsReady() { // Reconcile this copy of the Channel and then write back any status // updates regardless of whether the reconcile error out. - requeue, err = r.reconcile(ctx, newChannel) + requeue, err = r.reconcile(ctx, channel) } else { - newChannel.Status.MarkNotProvisioned("NotProvisioned", "ClusterChannelProvisioner %s is not ready", clusterChannelProvisioner.Name) + channel.Status.MarkNotProvisioned("NotProvisioned", "ClusterChannelProvisioner %s is not ready", clusterChannelProvisioner.Name) err = fmt.Errorf("ClusterChannelProvisioner %s is not ready", clusterChannelProvisioner.Name) } if err != nil { r.logger.Error("Dispatcher reconciliation failed", zap.Error(err)) - r.recorder.Eventf(newChannel, v1.EventTypeWarning, dispatcherReconcileFailed, "Dispatcher reconciliation failed: %v", err) + r.recorder.Eventf(channel, v1.EventTypeWarning, dispatcherReconcileFailed, "Dispatcher reconciliation failed: %v", err) } else { r.logger.Debug("Channel reconciled") } - if updateChannelErr := util.UpdateChannel(ctx, r.client, newChannel); updateChannelErr != nil { + if updateChannelErr := util.UpdateChannel(ctx, r.client, channel); updateChannelErr != nil { r.logger.Info("failed to update channel status", zap.Error(updateChannelErr)) - r.recorder.Eventf(newChannel, v1.EventTypeWarning, dispatcherUpdateStatusFailed, "Failed to update Channel's dispatcher status: %v", err) + r.recorder.Eventf(channel, v1.EventTypeWarning, dispatcherUpdateStatusFailed, "Failed to update Channel's dispatcher status: %v", err) return reconcile.Result{}, updateChannelErr } @@ -134,13 +126,6 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err // boolean indicates if this Channel should be immediately requeued for another reconcile loop. The // returned error indicates an error during reconciliation. func (r *reconciler) reconcile(ctx context.Context, channel *eventingv1alpha1.Channel) (bool, error) { - - // We always need to sync the Channel config, so do it first. - if err := r.syncChannelConfig(ctx); err != nil { - r.logger.Info("error updating syncing the Channel config", zap.Error(err)) - return false, err - } - // We don't currently initialize r.kafkaClusterAdmin, hence we end up creating the cluster admin client every time. // This is because of an issue with Shopify/sarama. See https://github.com/Shopify/sarama/issues/1162. // Once the issue is fixed we should use a shared cluster admin client. Also, r.kafkaClusterAdmin is currently @@ -177,19 +162,12 @@ func (r *reconciler) reconcile(ctx context.Context, channel *eventingv1alpha1.Ch return false, err } - svc, err := util.CreateK8sService(ctx, r.client, channel) + svc, err := util.CreateK8sService(ctx, r.client, channel, util.ExternalService(channel)) if err != nil { r.logger.Info("error creating the Channel's K8s Service", zap.Error(err)) return false, err } channel.Status.SetAddress(eventingNames.ServiceHostName(svc.Name, svc.Namespace)) - - _, err = util.CreateVirtualService(ctx, r.client, channel, svc) - if err != nil { - r.logger.Info("error creating the Virtual Service for the Channel", zap.Error(err)) - return false, err - } - channel.Status.MarkProvisioned() // close the connection @@ -268,110 +246,6 @@ func (r *reconciler) getClusterChannelProvisioner() (*eventingv1alpha1.ClusterCh return clusterChannelProvisioner, nil } -func (r *reconciler) syncChannelConfig(ctx context.Context) error { - channels, err := r.listAllChannels(ctx) - if err != nil { - r.logger.Info("Unable to list channels", zap.Error(err)) - return err - } - config := multiChannelFanoutConfig(channels) - return r.writeConfigMap(ctx, config) -} - -func (r *reconciler) writeConfigMap(ctx context.Context, config *multichannelfanout.Config) error { - logger := r.logger.With(zap.Any("configMap", r.configMapKey)) - - updated, err := configmap.SerializeConfig(*config) - if err != nil { - r.logger.Error("Unable to serialize config", zap.Error(err), zap.Any("config", config)) - return err - } - - cm := &corev1.ConfigMap{} - err = r.client.Get(ctx, r.configMapKey, cm) - if errors.IsNotFound(err) { - cm = r.createNewConfigMap(updated) - err = r.client.Create(ctx, cm) - if err != nil { - logger.Info("Unable to create ConfigMap", zap.Error(err)) - return err - } - } - if err != nil { - logger.Info("Unable to get ConfigMap", zap.Error(err)) - return err - } - - if equality.Semantic.DeepEqual(cm.Data, updated) { - // Nothing to update. - return nil - } - - cm.Data = updated - return r.client.Update(ctx, cm) -} - -func (r *reconciler) createNewConfigMap(data map[string]string) *corev1.ConfigMap { - return &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: r.configMapKey.Namespace, - Name: r.configMapKey.Name, - }, - Data: data, - } -} - -func multiChannelFanoutConfig(channels []eventingv1alpha1.Channel) *multichannelfanout.Config { - cc := make([]multichannelfanout.ChannelConfig, 0) - for _, c := range channels { - channelConfig := multichannelfanout.ChannelConfig{ - Namespace: c.Namespace, - Name: c.Name, - } - if c.Spec.Subscribable != nil { - channelConfig.FanoutConfig = fanout.Config{ - Subscriptions: c.Spec.Subscribable.Subscribers, - } - } - cc = append(cc, channelConfig) - } - return &multichannelfanout.Config{ - ChannelConfigs: cc, - } -} - -func (r *reconciler) listAllChannels(ctx context.Context) ([]eventingv1alpha1.Channel, error) { - clusterChannelProvisioner, err := r.getClusterChannelProvisioner() - if err != nil { - return nil, err - } - - channels := make([]eventingv1alpha1.Channel, 0) - - opts := &client.ListOptions{ - // Set Raw because if we need to get more than one page, then we will put the continue token - // into opts.Raw.Continue. - Raw: &metav1.ListOptions{}, - } - for { - cl := &eventingv1alpha1.ChannelList{} - if err = r.client.List(ctx, opts, cl); err != nil { - return nil, err - } - - for _, c := range cl.Items { - if r.shouldReconcile(&c, clusterChannelProvisioner) { - channels = append(channels, c) - } - } - if cl.Continue != "" { - opts.Raw.Continue = cl.Continue - } else { - return channels, nil - } - } -} - func createKafkaAdminClient(config *controller.KafkaProvisionerConfig) (sarama.ClusterAdmin, error) { saramaConf := sarama.NewConfig() saramaConf.Version = sarama.V1_1_0_0 diff --git a/contrib/kafka/pkg/controller/channel/reconcile_test.go b/contrib/kafka/pkg/controller/channel/reconcile_test.go index 67253e20ce6..bff18603f02 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile_test.go +++ b/contrib/kafka/pkg/controller/channel/reconcile_test.go @@ -30,10 +30,12 @@ import ( eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" util "github.com/knative/eventing/pkg/provisioners" + "github.com/knative/eventing/pkg/reconciler/names" controllertesting "github.com/knative/eventing/pkg/reconciler/testing" "github.com/knative/eventing/pkg/utils" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "github.com/knative/pkg/system" _ "github.com/knative/pkg/system/testing" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -142,13 +144,13 @@ var testCases = []controllertesting.TestCase{ InitialState: []runtime.Object{ getNewClusterChannelProvisioner(clusterChannelProvisionerName, true), getNewChannel(channelName, clusterChannelProvisionerName), - makeVirtualService(), }, WantResult: reconcile.Result{ Requeue: true, }, WantPresent: []runtime.Object{ getNewChannelWithStatusAndFinalizer(channelName, clusterChannelProvisionerName), + makeK8sService(), }, }, { @@ -156,7 +158,6 @@ var testCases = []controllertesting.TestCase{ InitialState: []runtime.Object{ getNewClusterChannelProvisioner(clusterChannelProvisionerName, true), getNewChannelWithStatusAndFinalizer(channelName, clusterChannelProvisionerName), - makeVirtualService(), }, WantPresent: []runtime.Object{ getNewChannelProvisionedStatus(channelName, clusterChannelProvisionerName), @@ -523,18 +524,32 @@ func getNewClusterChannelProvisioner(name string, isReady bool) *eventingv1alpha return clusterChannelProvisioner } -func makeVirtualService() *istiov1alpha3.VirtualService { - return &istiov1alpha3.VirtualService{ +func om(namespace, name string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name), + } +} + +func getControllerConfig() *controller.KafkaProvisionerConfig { + return &controller.KafkaProvisionerConfig{ + Brokers: []string{"test-broker"}, + } +} + +func makeK8sService() *corev1.Service { + return &corev1.Service{ TypeMeta: metav1.TypeMeta{ - APIVersion: istiov1alpha3.SchemeGroupVersion.String(), - Kind: "VirtualService", + APIVersion: "v1", + Kind: "Service", }, ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-channel", testNS), - Namespace: testNS, + GenerateName: fmt.Sprintf("%s-channel-", channelName), + Namespace: testNS, Labels: map[string]string{ - "channel": channelName, - "provisioner": clusterChannelProvisionerName, + util.EventingChannelLabel: channelName, + util.OldEventingChannelLabel: channelName, }, OwnerReferences: []metav1.OwnerReference{ { @@ -547,38 +562,9 @@ func makeVirtualService() *istiov1alpha3.VirtualService { }, }, }, - Spec: istiov1alpha3.VirtualServiceSpec{ - Hosts: []string{ - serviceAddress, - fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()), - }, - HTTP: []istiov1alpha3.HTTPRoute{{ - Rewrite: &istiov1alpha3.HTTPRewrite{ - Authority: fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()), - }, - Route: []istiov1alpha3.DestinationWeight{{ - Destination: istiov1alpha3.Destination{ - Host: "kafka-provisioner.knative-testing.svc." + utils.GetClusterDomainName(), - Port: istiov1alpha3.PortSelector{ - Number: util.PortNumber, - }, - }}, - }}, - }, + Spec: corev1.ServiceSpec{ + ExternalName: names.ServiceHostName(fmt.Sprintf("%s-dispatcher", clusterChannelProvisionerName), system.Namespace()), + Type: "ExternalName", }, } } - -func om(namespace, name string) metav1.ObjectMeta { - return metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name), - } -} - -func getControllerConfig() *controller.KafkaProvisionerConfig { - return &controller.KafkaProvisionerConfig{ - Brokers: []string{"test-broker"}, - } -} diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index 1fc8026dd27..fd9caf06d52 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -69,6 +69,7 @@ type k8sServiceOption func(*corev1.Service) error func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { return func(svc *corev1.Service) error { svc.Spec = corev1.ServiceSpec{ + ClusterIP: "", Type: "ExternalName", ExternalName: names.ServiceHostName(channelDispatcherServiceName(c.Spec.Provisioner.Name), system.Namespace()), } @@ -125,7 +126,7 @@ func createK8sService(ctx context.Context, client runtimeClient.Client, getSvc g } // spec.clusterIP is immutable and is set on existing services. If we don't set this // to the same value, we will encounter an error while updating. - svc.Spec.ClusterIP = current.Spec.ClusterIP + // svc.Spec.ClusterIP = current.Spec.ClusterIP if !equality.Semantic.DeepDerivative(svc.Spec, current.Spec) || !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) || // This DeepEqual is necessary to force update dispatcher services when upgrading from 0.5 to 0.6. diff --git a/pkg/reconciler/v1alpha1/broker/resources/ingress.go b/pkg/reconciler/v1alpha1/broker/resources/ingress.go index 8df5a57f841..39084b4b6d5 100644 --- a/pkg/reconciler/v1alpha1/broker/resources/ingress.go +++ b/pkg/reconciler/v1alpha1/broker/resources/ingress.go @@ -58,10 +58,6 @@ func MakeIngress(args *IngressArgs) *appsv1.Deployment { Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: ingressLabels(args.Broker), - // TODO: Remove this annotation once all channels stop using istio virtual service - Annotations: map[string]string{ - "sidecar.istio.io/inject": "true", - }, }, Spec: corev1.PodSpec{ ServiceAccountName: args.ServiceAccountName, diff --git a/pkg/sidecar/multichannelfanout/config.go b/pkg/sidecar/multichannelfanout/config.go new file mode 100644 index 00000000000..84f29b85089 --- /dev/null +++ b/pkg/sidecar/multichannelfanout/config.go @@ -0,0 +1,67 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package multichannelfanout provides an http.Handler that takes in one request to a Knative +// Channel and fans it out to N other requests. Logically, it represents multiple Knative Channels. +// It is made up of a map, map[channel]fanout.Handler and each incoming request is inspected to +// determine which Channel it is on. This Handler delegates the HTTP handling to the fanout.Handler +// corresponding to the incoming request's Channel. +// It is often used in conjunction with a swappable.Handler. The swappable.Handler delegates all its +// requests to the multichannelfanout.Handler. When a new configuration is available, a new +// multichannelfanout.Handler is created and swapped in for all subsequent requests. The old +// multichannelfanout.Handler is discarded. + +package multichannelfanout + +import ( + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/sidecar/fanout" +) + +// Config for a multichannelfanout.Handler. +type Config struct { + // The configuration of each channel in this handler. + ChannelConfigs []ChannelConfig `json:"channelConfigs"` +} + +// ChannelConfig is the configuration for a single Channel. +type ChannelConfig struct { + Namespace string `json:"namespace"` + Name string `json:"name"` + HostName string `json:"hostname"` + FanoutConfig fanout.Config `json:"fanoutConfig"` +} + +// NewConfigFromChannels creates a new Config from the list of channels +func NewConfigFromChannels(channels []v1alpha1.Channel) *Config { + cc := make([]ChannelConfig, 0) + for _, c := range channels { + channelConfig := ChannelConfig{ + Namespace: c.Namespace, + Name: c.Name, + HostName: c.Status.Address.Hostname, + } + if c.Spec.Subscribable != nil { + channelConfig.FanoutConfig = fanout.Config{ + Subscriptions: c.Spec.Subscribable.Subscribers, + } + } + cc = append(cc, channelConfig) + } + return &Config{ + ChannelConfigs: cc, + } +} diff --git a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go index 282a1c0985d..e4a7e1c9193 100644 --- a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go +++ b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go @@ -34,20 +34,6 @@ import ( "go.uber.org/zap" ) -// Config for a multichannelfanout.Handler. -type Config struct { - // The configuration of each channel in this handler. - ChannelConfigs []ChannelConfig `json:"channelConfigs"` -} - -// ChannelConfig is the configuration for a single Channel. -type ChannelConfig struct { - Namespace string `json:"namespace"` - Name string `json:"name"` - HostName string `json:"hostname"` - FanoutConfig fanout.Config `json:"fanoutConfig"` -} - // makeChannelKeyFromConfig creates the channel key for a given channelConfig. It is a helper around // MakeChannelKey. func makeChannelKeyFromConfig(config ChannelConfig) string { From 37bae81f1d6fc760d94ea100a7e6980d3bf48075 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Fri, 12 Apr 2019 10:01:25 -0700 Subject: [PATCH 10/26] WIP kafka - UTs and E2E pass More UTs needded --- cmd/fanoutsidecar/main.go | 4 +- contrib/gcppubsub/pkg/dispatcher/cmd/main.go | 5 +- .../pkg/dispatcher/receiver/receiver.go | 10 +- .../pkg/dispatcher/receiver/receiver_test.go | 11 +- contrib/kafka/cmd/dispatcher/main.go | 1 - contrib/kafka/config/kafka.yaml | 7 ++ .../pkg/controller/channel/reconcile_test.go | 9 +- contrib/kafka/pkg/dispatcher/dispatcher.go | 57 +++++++++- .../kafka/pkg/dispatcher/dispatcher_test.go | 102 +++++++++++++++--- .../pkg/dispatcher/dispatcher/dispatcher.go | 6 +- pkg/channelwatcher/channel_watcher.go | 4 +- pkg/provisioners/channel_util.go | 3 +- .../inmemory/channel/reconcile.go | 18 +--- pkg/provisioners/inmemory/controller/main.go | 3 +- pkg/provisioners/message_receiver.go | 39 +++++-- pkg/provisioners/message_receiver_test.go | 5 +- pkg/provisioners/provisioner_util.go | 5 +- pkg/sidecar/fanout/fanout_handler.go | 11 +- pkg/sidecar/fanout/fanout_handler_test.go | 13 ++- .../multi_channel_fanout_handler.go | 6 +- 20 files changed, 251 insertions(+), 68 deletions(-) diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index ed2fe9a2d6d..f5a803296a3 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -131,7 +131,9 @@ func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfi logger.Error("Error creating new maanger.", zap.Error(err)) return nil, err } - v1alpha1.AddToScheme(mgr.GetScheme()) + if err := v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { + logger.Error("Error adding eventinging scheme to manager.", zap.Error(err)) + } channelwatcher.New(mgr, logger, updateChannelConfig(configUpdated)) return mgr, nil diff --git a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go index a2a82a4e638..078e08bf2d4 100644 --- a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go +++ b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go @@ -61,7 +61,10 @@ func main() { // PubSub) and the dispatcher (takes messages in PubSub and sends them in cluster) in this // binary. - _, runnables := receiver.New(logger.Desugar(), mgr.GetClient(), util.GcpPubSubClientCreator) + _, runnables, err := receiver.New(logger.Desugar(), mgr.GetClient(), util.GcpPubSubClientCreator) + if err != nil { + logger.Fatal("Unable to create new receiver and runnable", zap.Error(err)) + } for _, runnable := range runnables { err = mgr.Add(runnable) if err != nil { diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go index 702ee7fd5f4..665bb80dda5 100644 --- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go +++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go @@ -44,7 +44,7 @@ type Receiver struct { // New creates a new Receiver and its associated MessageReceiver. The caller is responsible for // Start()ing the returned MessageReceiver. -func New(logger *zap.Logger, client client.Client, pubSubClientCreator util.PubSubClientCreator) (*Receiver, []manager.Runnable) { +func New(logger *zap.Logger, client client.Client, pubSubClientCreator util.PubSubClientCreator) (*Receiver, []manager.Runnable, error) { r := &Receiver{ logger: logger, client: client, @@ -52,10 +52,14 @@ func New(logger *zap.Logger, client client.Client, pubSubClientCreator util.PubS pubSubClientCreator: pubSubClientCreator, cache: cache.NewTTL(), } - return r, []manager.Runnable{r.newMessageReceiver(), r.cache} + receiver, err := r.newMessageReceiver() + if err != nil { + return nil, nil, err + } + return r, []manager.Runnable{receiver, r.cache}, nil } -func (r *Receiver) newMessageReceiver() *provisioners.MessageReceiver { +func (r *Receiver) newMessageReceiver() (*provisioners.MessageReceiver, error) { return provisioners.NewMessageReceiver(r.sendEventToTopic, r.logger.Sugar()) } diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go index 6d9b2353b30..d6d69db23b6 100644 --- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go +++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go @@ -129,14 +129,21 @@ func TestReceiver(t *testing.T) { } for n, tc := range testCases { t.Run(n, func(t *testing.T) { - mr, _ := New( + mr, _, err := New( zap.NewNop(), fake.NewFakeClient(tc.initialState...), fakepubsub.Creator(tc.pubSubData)) + if err != nil { + t.Errorf("Error when creating a New receiver. Error:%s", err) + } resp := httptest.NewRecorder() req := httptest.NewRequest("POST", "/", strings.NewReader(validMessage)) req.Host = "test-channel.test-namespace.channels." + utils.GetClusterDomainName() - mr.newMessageReceiver().HandleRequest(resp, req) + receiver, err := mr.newMessageReceiver() + if err != nil { + t.Errorf("Error when creating a new message receiver. Error:%s", err) + } + receiver.HandleRequest(resp, req) if tc.expectedErr { if resp.Result().StatusCode >= 200 && resp.Result().StatusCode < 300 { t.Errorf("Expected an error. Actual: %v", resp.Result()) diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index 6748e52c088..9fe7cfabd52 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -43,7 +43,6 @@ func main() { if err != nil { log.Fatalf("unable to create logger: %v", err) } - provisionerConfig, err := provisionerController.GetProvisionerConfig("/etc/config-provisioner") if err != nil { logger.Fatal("unable to load provisioner config", zap.Error(err)) diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index d0b25649e00..82298b56519 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -170,6 +170,13 @@ rules: - get - list - watch + - apiGroups: + - eventing.knative.dev + resources: + - channels + verbs: + - list + - watch --- diff --git a/contrib/kafka/pkg/controller/channel/reconcile_test.go b/contrib/kafka/pkg/controller/channel/reconcile_test.go index bff18603f02..33fad32efb3 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile_test.go +++ b/contrib/kafka/pkg/controller/channel/reconcile_test.go @@ -150,7 +150,6 @@ var testCases = []controllertesting.TestCase{ }, WantPresent: []runtime.Object{ getNewChannelWithStatusAndFinalizer(channelName, clusterChannelProvisionerName), - makeK8sService(), }, }, { @@ -161,6 +160,7 @@ var testCases = []controllertesting.TestCase{ }, WantPresent: []runtime.Object{ getNewChannelProvisionedStatus(channelName, clusterChannelProvisionerName), + makeK8sService(), }, }, { @@ -529,6 +529,7 @@ func om(namespace, name string) metav1.ObjectMeta { Namespace: namespace, Name: name, SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name), + UID: testUID, } } @@ -548,8 +549,10 @@ func makeK8sService() *corev1.Service { GenerateName: fmt.Sprintf("%s-channel-", channelName), Namespace: testNS, Labels: map[string]string{ - util.EventingChannelLabel: channelName, - util.OldEventingChannelLabel: channelName, + util.EventingChannelLabel: channelName, + util.OldEventingChannelLabel: channelName, + util.EventingProvisionerLabel: clusterChannelProvisionerName, + util.OldEventingProvisionerLabel: clusterChannelProvisionerName, }, OwnerReferences: []metav1.OwnerReference{ { diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index adad489be49..bfe4790d062 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -34,8 +34,9 @@ import ( ) type KafkaDispatcher struct { - config atomic.Value - updateLock sync.Mutex + config atomic.Value + hostToChannelMap atomic.Value + updateLock sync.Mutex receiver *provisioners.MessageReceiver dispatcher *provisioners.MessageDispatcher @@ -131,12 +132,36 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error } } + hcMap, err := createHostToChannelMap(config) + if err != nil { + return err + } + d.setHostToChannelMap(hcMap) + // Update the config so that it can be used for comparison during next sync d.setConfig(config) + } return nil } +func createHostToChannelMap(config *multichannelfanout.Config) (map[string]provisioners.ChannelReference, error) { + hcMap := make(map[string]provisioners.ChannelReference) + for _, cConfig := range config.ChannelConfigs { + if cr, ok := hcMap[cConfig.HostName]; ok { + return nil, fmt.Errorf( + "Duplicate hostName found. HostName:%s, channel:%s.%s, channel:%s.%s", + cConfig.HostName, + cConfig.Namespace, + cConfig.Name, + cr.Namespace, + cr.Name) + } + hcMap[cConfig.HostName] = provisioners.ChannelReference{Name: cConfig.Name, Namespace: cConfig.Namespace} + } + return hcMap, nil +} + // Start starts the kafka dispatcher's message processing. func (d *KafkaDispatcher) Start(stopCh <-chan struct{}) error { if d.receiver == nil { @@ -258,8 +283,15 @@ func (d *KafkaDispatcher) setConfig(config *multichannelfanout.Config) { d.config.Store(config) } -func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger *zap.Logger) (*KafkaDispatcher, error) { +func (d *KafkaDispatcher) getHostToChannelMap() map[string]provisioners.ChannelReference { + return d.hostToChannelMap.Load().(map[string]provisioners.ChannelReference) +} +func (d *KafkaDispatcher) setHostToChannelMap(hcMap map[string]provisioners.ChannelReference) { + d.hostToChannelMap.Store(hcMap) +} + +func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger *zap.Logger) (*KafkaDispatcher, error) { conf := sarama.NewConfig() conf.Version = sarama.V1_1_0_0 conf.ClientID = controller.Name + "-dispatcher" @@ -282,16 +314,31 @@ func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger * logger: logger, } - receiverFunc := provisioners.NewMessageReceiver( + receiverFunc, err := provisioners.NewMessageReceiver( func(channel provisioners.ChannelReference, message *provisioners.Message) error { dispatcher.kafkaAsyncProducer.Input() <- toKafkaMessage(channel, message) return nil - }, logger.Sugar()) + }, + logger.Sugar(), + provisioners.ResolveChannelFromHostHeader(provisioners.ResolveChannelFromHostFunc(dispatcher.getChannelReferenceFromHost))) + if err != nil { + return nil, err + } dispatcher.receiver = receiverFunc dispatcher.setConfig(&multichannelfanout.Config{}) + dispatcher.setHostToChannelMap(map[string]provisioners.ChannelReference{}) return dispatcher, nil } +func (d *KafkaDispatcher) getChannelReferenceFromHost(host string) (provisioners.ChannelReference, error) { + chMap := d.getHostToChannelMap() + cr, ok := chMap[host] + if !ok { + return cr, fmt.Errorf("Invalid HostName:%s. HostName not found in ConfigMap for any Channel", host) + } + return cr, nil +} + func fromKafkaMessage(kafkaMessage *sarama.ConsumerMessage) *provisioners.Message { headers := make(map[string]string) for _, header := range kafkaMessage.Headers { diff --git a/contrib/kafka/pkg/dispatcher/dispatcher_test.go b/contrib/kafka/pkg/dispatcher/dispatcher_test.go index 8c333e7c56e..9d0a32222fb 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher_test.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher_test.go @@ -183,23 +183,29 @@ func (c *mockSaramaCluster) GetConsumerMode() cluster.ConsumerMode { func TestDispatcher_UpdateConfig(t *testing.T) { testCases := []struct { - name string - oldConfig *multichannelfanout.Config - newConfig *multichannelfanout.Config - subscribes []string - unsubscribes []string - createErr string + name string + oldConfig *multichannelfanout.Config + newConfig *multichannelfanout.Config + subscribes []string + unsubscribes []string + createErr string + oldHostToChanMap map[string]provisioners.ChannelReference + newHostToChanMap map[string]provisioners.ChannelReference }{ { - name: "nil config", - oldConfig: &multichannelfanout.Config{}, - newConfig: nil, - createErr: "nil config", + name: "nil config", + oldConfig: &multichannelfanout.Config{}, + newConfig: nil, + createErr: "nil config", + oldHostToChanMap: map[string]provisioners.ChannelReference{}, + newHostToChanMap: map[string]provisioners.ChannelReference{}, }, { - name: "same config", - oldConfig: &multichannelfanout.Config{}, - newConfig: &multichannelfanout.Config{}, + name: "same config", + oldConfig: &multichannelfanout.Config{}, + newConfig: &multichannelfanout.Config{}, + oldHostToChanMap: map[string]provisioners.ChannelReference{}, + newHostToChanMap: map[string]provisioners.ChannelReference{}, }, { name: "config with no subscription", @@ -209,9 +215,14 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel", + HostName: "a.b.c.d", }, }, }, + oldHostToChanMap: map[string]provisioners.ChannelReference{}, + newHostToChanMap: map[string]provisioners.ChannelReference{ + "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"}, + }, }, { name: "single channel w/ new subscriptions", @@ -221,6 +232,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel", + HostName: "a.b.c.d", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -240,7 +252,11 @@ func TestDispatcher_UpdateConfig(t *testing.T) { }, }, }, - subscribes: []string{"subscription-1", "subscription-2"}, + subscribes: []string{"subscription-1", "subscription-2"}, + oldHostToChanMap: map[string]provisioners.ChannelReference{}, + newHostToChanMap: map[string]provisioners.ChannelReference{ + "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"}, + }, }, { name: "single channel w/ existing subscriptions", @@ -249,6 +265,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel", + HostName: "a.b.c.d", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -269,6 +286,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel", + HostName: "a.b.c.d", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -290,6 +308,12 @@ func TestDispatcher_UpdateConfig(t *testing.T) { }, subscribes: []string{"subscription-2", "subscription-3"}, unsubscribes: []string{"subscription-1"}, + oldHostToChanMap: map[string]provisioners.ChannelReference{ + "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"}, + }, + newHostToChanMap: map[string]provisioners.ChannelReference{ + "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"}, + }, }, { name: "multi channel w/old and new subscriptions", @@ -298,6 +322,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel-1", + HostName: "a.b.c.d", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -319,6 +344,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel-1", + HostName: "a.b.c.d", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -333,6 +359,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel-2", + HostName: "e.f.g.h", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -354,6 +381,33 @@ func TestDispatcher_UpdateConfig(t *testing.T) { }, subscribes: []string{"subscription-1", "subscription-3", "subscription-4"}, unsubscribes: []string{"subscription-2"}, + oldHostToChanMap: map[string]provisioners.ChannelReference{ + "a.b.c.d": provisioners.ChannelReference{Name: "test-channel-1", Namespace: "default"}, + }, + newHostToChanMap: map[string]provisioners.ChannelReference{ + "a.b.c.d": provisioners.ChannelReference{Name: "test-channel-1", Namespace: "default"}, + "e.f.g.h": provisioners.ChannelReference{Name: "test-channel-2", Namespace: "default"}, + }, + }, + { + name: "Duplicate hostnames", + oldConfig: &multichannelfanout.Config{}, + newConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "test-channel-1", + HostName: "a.b.c.d", + }, + { + Namespace: "default", + Name: "test-channel-2", + HostName: "a.b.c.d", + }, + }, + }, + createErr: "Duplicate hostName found. HostName:a.b.c.d, channel:default.test-channel-2, channel:default.test-channel-1", + oldHostToChanMap: map[string]provisioners.ChannelReference{}, }, } @@ -367,10 +421,12 @@ func TestDispatcher_UpdateConfig(t *testing.T) { logger: zap.NewNop(), } d.setConfig(&multichannelfanout.Config{}) + d.setHostToChannelMap(map[string]provisioners.ChannelReference{}) // Initialize using oldConfig err := d.UpdateConfig(tc.oldConfig) if err != nil { + t.Errorf("unexpected error: %v", err) } oldSubscribers := sets.NewString() @@ -382,6 +438,12 @@ func TestDispatcher_UpdateConfig(t *testing.T) { if diff := sets.NewString(tc.unsubscribes...).Difference(oldSubscribers); diff.Len() != 0 { t.Errorf("subscriptions %+v were never subscribed", diff) } + if diff := cmp.Diff(tc.oldConfig, d.getConfig()); diff != "" { + t.Errorf("unexpected config (-want, +got) = %v", diff) + } + if diff := cmp.Diff(tc.oldHostToChanMap, d.getHostToChannelMap()); diff != "" { + t.Errorf("unexpected hostToChannelMap (-want, +got) = %v", diff) + } // Update with new config err = d.UpdateConfig(tc.newConfig) @@ -406,6 +468,12 @@ func TestDispatcher_UpdateConfig(t *testing.T) { if diff := cmp.Diff(tc.subscribes, newSubscribers, sortStrings); diff != "" { t.Errorf("unexpected subscribers (-want, +got) = %v", diff) } + if diff := cmp.Diff(tc.newHostToChanMap, d.getHostToChannelMap()); diff != "" { + t.Errorf("unexpected hostToChannelMap (-want, +got) = %v", diff) + } + if diff := cmp.Diff(tc.newConfig, d.getConfig()); diff != "" { + t.Errorf("unexpected config (-want, +got) = %v", diff) + } }) } @@ -631,9 +699,13 @@ func TestKafkaDispatcher_Start(t *testing.T) { t.Errorf("Expected error want %s, got %s", "message receiver is not set", err) } - d.receiver = provisioners.NewMessageReceiver(func(channel provisioners.ChannelReference, message *provisioners.Message) error { + receiver, err := provisioners.NewMessageReceiver(func(channel provisioners.ChannelReference, message *provisioners.Message) error { return nil }, zap.NewNop().Sugar()) + if err != nil { + t.Fatalf("Error creating new message receiver. Error:%s", err) + } + d.receiver = receiver err = d.Start(make(chan struct{})) if err == nil { t.Errorf("Expected error want %s, got %s", "kafkaAsyncProducer is not set", err) diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go index fa6675b32cb..e2ca29c959d 100644 --- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go +++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go @@ -71,7 +71,11 @@ func NewDispatcher(natssURL, clusterID string, logger *zap.Logger) (*Subscriptio clusterID: clusterID, subscriptions: make(map[provisioners.ChannelReference]map[subscriptionReference]*stan.Subscription), } - d.receiver = provisioners.NewMessageReceiver(createReceiverFunction(d, logger.Sugar()), logger.Sugar()) + receiver, err := provisioners.NewMessageReceiver(createReceiverFunction(d, logger.Sugar()), logger.Sugar()) + if err != nil { + return nil, err + } + d.receiver = receiver return d, nil } diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index 1b9f7dcbb2a..b9a77670ce8 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -26,9 +26,9 @@ type reconciler struct { func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) { ctx := logging.WithLogger(context.TODO(), r.logger.With(zap.Any("request", req))) - r.logger.Info("New update for channel.") + logging.FromContext(ctx).Info("New update for channel.") if err := r.handler(ctx, r.client, req.NamespacedName); err != nil { - r.logger.Error("WatchHandlerFunc returned error", zap.Error(err)) + logging.FromContext(ctx).Error("WatchHandlerFunc returned error", zap.Error(err)) return reconcile.Result{}, err } return reconcile.Result{}, nil diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index 3ca480a6582..f0705452d44 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -65,7 +65,7 @@ func RemoveFinalizer(o metav1.Object, finalizerName string) { type k8sServiceOption func(*corev1.Service) error -// ExternalService is a functional option for CreateK8sService to create a K8s service of type ExternalName +// ExternalService is a functional option for CreateK8sService to create a K8s service of type ExternalName. func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { return func(svc *corev1.Service) error { svc.Spec = corev1.ServiceSpec{ @@ -132,6 +132,7 @@ func createK8sService(ctx context.Context, client runtimeClient.Client, getSvc g !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) || // This DeepEqual is necessary to force update dispatcher services when upgrading from 0.5 to 0.6. // Above DeepDerivative will not work because we have removed an optional field (name) from ports + // TODO: Remove this check in 0.7+ !equality.Semantic.DeepEqual(svc.Spec.Ports, current.Spec.Ports) { current.Spec = svc.Spec current.ObjectMeta.Labels = addExpectedLabels(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go index fffbdc34c64..2800027f485 100644 --- a/pkg/provisioners/inmemory/channel/reconcile.go +++ b/pkg/provisioners/inmemory/channel/reconcile.go @@ -95,11 +95,12 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q", c.Name) } - if updateStatusErr := util.UpdateChannel(ctx, r.client, c); updateStatusErr != nil { + if updateStatusErr := r.client.Status().Update(ctx, c); updateStatusErr != nil { logger.Info("Error updating Channel Status", zap.Error(updateStatusErr)) r.recorder.Eventf(c, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update Channel's status: %v", err) return reconcile.Result{}, updateStatusErr } + return reconcile.Result{}, err } @@ -117,18 +118,9 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) c.Status.InitializeConditions() - // We are syncing three things: - // 1. The K8s Service to talk to this Channel. - // 3. The configuration of all Channel subscriptions. - - if c.DeletionTimestamp != nil { - // K8s garbage collection will delete the K8s service for this channel. - // We use a finalizer to ensure the channel config has been synced. - util.RemoveFinalizer(c, finalizerName) - return nil - } - - util.AddFinalizer(c, finalizerName) + // We are syncing the following: + // The K8s Service to talk to this Channel. + // The configuration of all Channel subscriptions. svc, err := util.CreateK8sService(ctx, r.client, c, util.ExternalService(c)) if err != nil { diff --git a/pkg/provisioners/inmemory/controller/main.go b/pkg/provisioners/inmemory/controller/main.go index 99ee64f885e..fbbdd262dff 100644 --- a/pkg/provisioners/inmemory/controller/main.go +++ b/pkg/provisioners/inmemory/controller/main.go @@ -29,9 +29,8 @@ import ( "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" - // uncomment this line to debug in GKE from local machine - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) func main() { diff --git a/pkg/provisioners/message_receiver.go b/pkg/provisioners/message_receiver.go index 3874fded80b..b4fceb1022c 100644 --- a/pkg/provisioners/message_receiver.go +++ b/pkg/provisioners/message_receiver.go @@ -34,16 +34,30 @@ const ( // Message receiver receives messages. type MessageReceiver struct { - receiverFunc func(ChannelReference, *Message) error - forwardHeaders sets.String - forwardPrefixes []string + receiverFunc func(ChannelReference, *Message) error + forwardHeaders sets.String + forwardPrefixes []string + logger *zap.SugaredLogger + hostToChannelFunc ResolveChannelFromHostFunc +} + +type receiverOptions func(*MessageReceiver) error - logger *zap.SugaredLogger +type ResolveChannelFromHostFunc func(string) (ChannelReference, error) + +// ResolveChannelFromHostHeader is a receiverOption that enables the consumer of the MessageReceiver +// to pass a map[]ChannelReference. This map will then be used to to get the ChannelReference +// from httpRequest.Host before calling receiverFunc +func ResolveChannelFromHostHeader(hostToChannelFunc ResolveChannelFromHostFunc) receiverOptions { + return func(r *MessageReceiver) error { + r.hostToChannelFunc = hostToChannelFunc + return nil + } } // NewMessageReceiver creates a message receiver passing new messages to the // receiverFunc. -func NewMessageReceiver(receiverFunc func(ChannelReference, *Message) error, logger *zap.SugaredLogger) *MessageReceiver { +func NewMessageReceiver(receiverFunc func(ChannelReference, *Message) error, logger *zap.SugaredLogger, opts ...receiverOptions) (*MessageReceiver, error) { receiver := &MessageReceiver{ receiverFunc: receiverFunc, forwardHeaders: sets.NewString(forwardHeaders...), @@ -51,7 +65,16 @@ func NewMessageReceiver(receiverFunc func(ChannelReference, *Message) error, log logger: logger, } - return receiver + for _, opt := range opts { + if err := opt(receiver); err != nil { + return nil, err + } + } + // Default to old behaviour of host = channelName.channelNamespace + if receiver.hostToChannelFunc == nil { + receiver.hostToChannelFunc = ResolveChannelFromHostFunc(ParseChannel) + } + return receiver, nil } // Start begings to receive messages for the receiver. @@ -116,13 +139,13 @@ func (r *MessageReceiver) handler() http.Handler { func (r *MessageReceiver) HandleRequest(res http.ResponseWriter, req *http.Request) { host := req.Host r.logger.Infof("Received request for %s", host) - channel, err := ParseChannel(host) + channel, err := r.hostToChannelFunc(host) if err != nil { r.logger.Info("Could not extract channel", zap.Error(err)) res.WriteHeader(http.StatusInternalServerError) return } - + r.logger.Infof("Request mapped to channel: %s", channel.String()) message, err := r.fromRequest(req) if err != nil { res.WriteHeader(http.StatusInternalServerError) diff --git a/pkg/provisioners/message_receiver_test.go b/pkg/provisioners/message_receiver_test.go index 8cd8ca9bfbe..e4ec33a718a 100644 --- a/pkg/provisioners/message_receiver_test.go +++ b/pkg/provisioners/message_receiver_test.go @@ -126,7 +126,10 @@ func TestMessageReceiver_HandleRequest(t *testing.T) { } f := tc.receiverFunc - r := NewMessageReceiver(f, zap.NewNop().Sugar()) + r, err := NewMessageReceiver(f, zap.NewNop().Sugar()) + if err != nil { + t.Fatalf("Error creating new message receiver. Error:%s", err) + } h := r.handler() body := tc.bodyReader diff --git a/pkg/provisioners/provisioner_util.go b/pkg/provisioners/provisioner_util.go index a65551fcc3f..8ef70596ed2 100644 --- a/pkg/provisioners/provisioner_util.go +++ b/pkg/provisioners/provisioner_util.go @@ -80,7 +80,8 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner, opts { // There is a bug in Istio where named port doesn't work when connecting using an ExternalName service // Refer to https://github.com/istio/istio/issues/13193 for more details. - // TODO: Revert this when ISTIO fixes the issue + // TODO: uncomment Name:"http" when ISTIO fixes the issue + // Name: "http", Port: 80, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(8080), @@ -91,7 +92,7 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner, opts for _, opt := range opts { if err := opt(svc); err != nil { - return svc, err + return nil, err } } return svc, nil diff --git a/pkg/sidecar/fanout/fanout_handler.go b/pkg/sidecar/fanout/fanout_handler.go index 2fd4ae97f9a..bd67bfe32c8 100644 --- a/pkg/sidecar/fanout/fanout_handler.go +++ b/pkg/sidecar/fanout/fanout_handler.go @@ -69,7 +69,7 @@ type forwardMessage struct { } // NewHandler creates a new fanout.Handler. -func NewHandler(logger *zap.Logger, config Config) *Handler { +func NewHandler(logger *zap.Logger, config Config) (*Handler, error) { handler := &Handler{ logger: logger, config: config, @@ -79,9 +79,12 @@ func NewHandler(logger *zap.Logger, config Config) *Handler { } // The receiver function needs to point back at the handler itself, so set it up after // initialization. - handler.receiver = provisioners.NewMessageReceiver(createReceiverFunction(handler), logger.Sugar()) - - return handler + receiver, err := provisioners.NewMessageReceiver(createReceiverFunction(handler), logger.Sugar()) + if err != nil { + return nil, err + } + handler.receiver = receiver + return handler, nil } func createReceiverFunction(f *Handler) func(provisioners.ChannelReference, *provisioners.Message) error { diff --git a/pkg/sidecar/fanout/fanout_handler_test.go b/pkg/sidecar/fanout/fanout_handler_test.go index 03b756ca8d9..1163144c8e4 100644 --- a/pkg/sidecar/fanout/fanout_handler_test.go +++ b/pkg/sidecar/fanout/fanout_handler_test.go @@ -225,12 +225,21 @@ func TestFanoutHandler_ServeHTTP(t *testing.T) { subs = append(subs, sub) } - h := NewHandler(zap.NewNop(), Config{Subscriptions: subs}) + h, err := NewHandler(zap.NewNop(), Config{Subscriptions: subs}) + if err != nil { + t.Errorf("NewHandler failed. Error:%s", err) + t.FailNow() + } if tc.asyncHandler { h.config.AsyncHandler = true } if tc.receiverFunc != nil { - h.receiver = provisioners.NewMessageReceiver(tc.receiverFunc, zap.NewNop().Sugar()) + receiver, err := provisioners.NewMessageReceiver(tc.receiverFunc, zap.NewNop().Sugar()) + if err != nil { + t.Errorf("NewMessageReceiver failed. Error:%s", err) + t.FailNow() + } + h.receiver = receiver } if tc.timeout != 0 { h.timeout = tc.timeout diff --git a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go index e4a7e1c9193..c14cd53725c 100644 --- a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go +++ b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go @@ -60,7 +60,11 @@ func NewHandler(logger *zap.Logger, conf Config) (*Handler, error) { for _, cc := range conf.ChannelConfigs { key := makeChannelKeyFromConfig(cc) - handler := fanout.NewHandler(logger, cc.FanoutConfig) + handler, err := fanout.NewHandler(logger, cc.FanoutConfig) + if err != nil { + logger.Error("Failed creating new fanout handler.", zap.Error(err)) + return nil, err + } if _, present := handlers[key]; present { logger.Error("Duplicate channel key", zap.String("channelKey", key)) return nil, fmt.Errorf("duplicate channel key: %v", key) From feb5e64feb4e7a837eb7a553a14d91b6d47517d7 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Fri, 12 Apr 2019 12:01:55 -0700 Subject: [PATCH 11/26] Updated code based on PR comments --- cmd/fanoutsidecar/main.go | 7 ++- pkg/channelwatcher/channel_watcher.go | 4 +- pkg/provisioners/channel_util.go | 3 +- .../inmemory/channel/reconcile.go | 29 ++++++------ .../inmemory/channel/reconcile_test.go | 44 +------------------ pkg/provisioners/inmemory/controller/main.go | 3 +- pkg/provisioners/provisioner_util.go | 5 ++- .../v1alpha1/broker/resources/ingress.go | 1 + 8 files changed, 31 insertions(+), 65 deletions(-) diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 6392dd91e57..54fa882d9a6 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -82,7 +82,7 @@ func main() { } if len(channelProvisioners) < 1 { - logger.Fatal("--channel_provisioners must be specified") + logger.Fatal("--channel_provisioner must be specified") } sh, err := swappable.NewEmptyHandler(logger) @@ -132,7 +132,10 @@ func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfi logger.Error("Error creating new maanger.", zap.Error(err)) return nil, err } - v1alpha1.AddToScheme(mgr.GetScheme()) + if err = v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { + logger.Error("Error while adding eventing scheme to manager.", zap.Error(err)) + return nil, err + } channelwatcher.New(mgr, logger, updateChannelConfig(configUpdated)) return mgr, nil diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index 1b9f7dcbb2a..b9a77670ce8 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -26,9 +26,9 @@ type reconciler struct { func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) { ctx := logging.WithLogger(context.TODO(), r.logger.With(zap.Any("request", req))) - r.logger.Info("New update for channel.") + logging.FromContext(ctx).Info("New update for channel.") if err := r.handler(ctx, r.client, req.NamespacedName); err != nil { - r.logger.Error("WatchHandlerFunc returned error", zap.Error(err)) + logging.FromContext(ctx).Error("WatchHandlerFunc returned error", zap.Error(err)) return reconcile.Result{}, err } return reconcile.Result{}, nil diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index 3ca480a6582..f0705452d44 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -65,7 +65,7 @@ func RemoveFinalizer(o metav1.Object, finalizerName string) { type k8sServiceOption func(*corev1.Service) error -// ExternalService is a functional option for CreateK8sService to create a K8s service of type ExternalName +// ExternalService is a functional option for CreateK8sService to create a K8s service of type ExternalName. func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { return func(svc *corev1.Service) error { svc.Spec = corev1.ServiceSpec{ @@ -132,6 +132,7 @@ func createK8sService(ctx context.Context, client runtimeClient.Client, getSvc g !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) || // This DeepEqual is necessary to force update dispatcher services when upgrading from 0.5 to 0.6. // Above DeepDerivative will not work because we have removed an optional field (name) from ports + // TODO: Remove this check in 0.7+ !equality.Semantic.DeepEqual(svc.Spec.Ports, current.Spec.Ports) { current.Spec = svc.Spec current.ObjectMeta.Labels = addExpectedLabels(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go index fffbdc34c64..630f22b4d6b 100644 --- a/pkg/provisioners/inmemory/channel/reconcile.go +++ b/pkg/provisioners/inmemory/channel/reconcile.go @@ -85,6 +85,19 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } logger.Info("Reconciling Channel") + // Finalizer needs to be removed (even though no finalizers are added) main back compat + // with v0.5 in which a finalzier was added. Or else channels will not get deleted after upgrading to 0.6 + // TODO: Remove this entire if block in v0.7+ + if c.DeletionTimestamp != nil { + // K8s garbage collection will delete the K8s service and VirtualService for this channel. + // We use a finalizer to ensure the channel config has been synced. + util.RemoveFinalizer(c, finalizerName) + r.client.Update(ctx, c) + logger.Info("Channel reconciled") + r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q", c.Name) + return reconcile.Result{}, nil + } + err = r.reconcile(ctx, c) if err != nil { logger.Info("Error reconciling Channel", zap.Error(err)) @@ -95,7 +108,7 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q", c.Name) } - if updateStatusErr := util.UpdateChannel(ctx, r.client, c); updateStatusErr != nil { + if updateStatusErr := r.client.Status().Update(ctx, c); updateStatusErr != nil { logger.Info("Error updating Channel Status", zap.Error(updateStatusErr)) r.recorder.Eventf(c, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update Channel's status: %v", err) return reconcile.Result{}, updateStatusErr @@ -117,19 +130,7 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) c.Status.InitializeConditions() - // We are syncing three things: - // 1. The K8s Service to talk to this Channel. - // 3. The configuration of all Channel subscriptions. - - if c.DeletionTimestamp != nil { - // K8s garbage collection will delete the K8s service for this channel. - // We use a finalizer to ensure the channel config has been synced. - util.RemoveFinalizer(c, finalizerName) - return nil - } - - util.AddFinalizer(c, finalizerName) - + // We are syncing K8s Service to talk to this Channel. svc, err := util.CreateK8sService(ctx, r.client, c, util.ExternalService(c)) if err != nil { logger.Info("Error creating the Channel's K8s Service", zap.Error(err)) diff --git a/pkg/provisioners/inmemory/channel/reconcile_test.go b/pkg/provisioners/inmemory/channel/reconcile_test.go index 76aa9e06f95..2978419df42 100644 --- a/pkg/provisioners/inmemory/channel/reconcile_test.go +++ b/pkg/provisioners/inmemory/channel/reconcile_test.go @@ -265,7 +265,7 @@ func TestReconcile(t *testing.T) { MockLists: errorListingK8sService(), }, WantPresent: []runtime.Object{ - makeChannelWithFinalizer(), + makeChannel(), }, WantErrMsg: testErrorMessage, WantEvent: []corev1.Event{ @@ -282,41 +282,14 @@ func TestReconcile(t *testing.T) { }, WantPresent: []runtime.Object{ // TODO: This should have a useful error message saying that the K8s Service failed. - makeChannelWithFinalizer(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[k8sServiceCreateFailed], - }, - }, - { - Name: "Channel get for update fails", - InitialState: []runtime.Object{ makeChannel(), - makeK8sService(), - }, - Mocks: controllertesting.Mocks{ - MockGets: errorOnSecondChannelGet(), }, WantErrMsg: testErrorMessage, WantEvent: []corev1.Event{ - events[channelReconciled], events[channelUpdateStatusFailed], + events[k8sServiceCreateFailed], }, }, { - Name: "Channel update fails", - InitialState: []runtime.Object{ - makeChannel(), - makeK8sService(), - }, - Mocks: controllertesting.Mocks{ - MockUpdates: errorUpdatingChannel(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[channelReconciled], events[channelUpdateStatusFailed], - }, - }, { Name: "Channel status update fails", InitialState: []runtime.Object{ makeChannel(), @@ -405,19 +378,6 @@ func getProvisionerName(pn []string) string { return provisionerName } -func makeChannelWithFinalizerAndAddress() *eventingv1alpha1.Channel { - c := makeChannelWithFinalizer() - c.Status.SetAddress(serviceAddress) - return c -} - -func makeReadyChannel() *eventingv1alpha1.Channel { - // Ready channels have the finalizer and are Addressable. - c := makeChannelWithFinalizerAndAddress() - c.Status.MarkProvisioned() - return c -} - func makeChannelNilProvisioner() *eventingv1alpha1.Channel { c := makeChannel() c.Spec.Provisioner = nil diff --git a/pkg/provisioners/inmemory/controller/main.go b/pkg/provisioners/inmemory/controller/main.go index 99ee64f885e..fbbdd262dff 100644 --- a/pkg/provisioners/inmemory/controller/main.go +++ b/pkg/provisioners/inmemory/controller/main.go @@ -29,9 +29,8 @@ import ( "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" - // uncomment this line to debug in GKE from local machine - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) func main() { diff --git a/pkg/provisioners/provisioner_util.go b/pkg/provisioners/provisioner_util.go index a65551fcc3f..0a3653df75f 100644 --- a/pkg/provisioners/provisioner_util.go +++ b/pkg/provisioners/provisioner_util.go @@ -80,7 +80,8 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner, opts { // There is a bug in Istio where named port doesn't work when connecting using an ExternalName service // Refer to https://github.com/istio/istio/issues/13193 for more details. - // TODO: Revert this when ISTIO fixes the issue + // TODO: Uncomment Name:"http" when ISTIO fixes the issue + // Name: "http", Port: 80, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(8080), @@ -91,7 +92,7 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner, opts for _, opt := range opts { if err := opt(svc); err != nil { - return svc, err + return nil, err } } return svc, nil diff --git a/pkg/reconciler/v1alpha1/broker/resources/ingress.go b/pkg/reconciler/v1alpha1/broker/resources/ingress.go index 8df5a57f841..3bde11755e9 100644 --- a/pkg/reconciler/v1alpha1/broker/resources/ingress.go +++ b/pkg/reconciler/v1alpha1/broker/resources/ingress.go @@ -59,6 +59,7 @@ func MakeIngress(args *IngressArgs) *appsv1.Deployment { ObjectMeta: metav1.ObjectMeta{ Labels: ingressLabels(args.Broker), // TODO: Remove this annotation once all channels stop using istio virtual service + // https://github.com/knative/eventing/issues/294 Annotations: map[string]string{ "sidecar.istio.io/inject": "true", }, From c1b85816e8c8f2572a760bed102a4388110e7c63 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 13:13:15 -0700 Subject: [PATCH 12/26] WIP --- cmd/fanoutsidecar/main.go | 5 - contrib/kafka/cmd/dispatcher/main.go | 33 +--- pkg/channelwatcher/channel_watcher.go | 44 ++++- pkg/channelwatcher/channel_watcher_test.go | 179 ++++++++++++++++++ pkg/reconciler/testing/table.go | 6 +- pkg/sidecar/multichannelfanout/config.go | 10 - pkg/sidecar/multichannelfanout/config_test.go | 141 ++++++++++++++ 7 files changed, 366 insertions(+), 52 deletions(-) create mode 100644 pkg/channelwatcher/channel_watcher_test.go create mode 100644 pkg/sidecar/multichannelfanout/config_test.go diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 618105b53e0..d560806e8cf 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -131,14 +131,9 @@ func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfi logger.Error("Error creating new maanger.", zap.Error(err)) return nil, err } -<<<<<<< HEAD - if err := v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { - logger.Error("Error adding eventinging scheme to manager.", zap.Error(err)) -======= if err = v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { logger.Error("Error while adding eventing scheme to manager.", zap.Error(err)) return nil, err ->>>>>>> feb5e64feb4e7a837eb7a553a14d91b6d47517d7 } channelwatcher.New(mgr, logger, updateChannelConfig(configUpdated)) diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index 9fe7cfabd52..7f15663181c 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -17,7 +17,6 @@ limitations under the License. package main import ( - "context" "flag" "log" @@ -26,13 +25,8 @@ import ( "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/channelwatcher" - "github.com/knative/eventing/pkg/logging" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "github.com/knative/eventing/pkg/sidecar/swappable" "github.com/knative/pkg/signals" "go.uber.org/zap" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" ) @@ -62,7 +56,7 @@ func main() { } v1alpha1.AddToScheme(mgr.GetScheme()) - channelwatcher.New(mgr, logger, updateChannelConfig(kafkaDispatcher.UpdateConfig)) + channelwatcher.New(mgr, logger, channelwatcher.UpdateChannelConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)) if err != nil { logger.Fatal("Unable to create channel watcher.", zap.Error(err)) } @@ -75,31 +69,6 @@ func main() { } logger.Info("Exiting...") } -func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.WatchHandlerFunc { - return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { - channels, err := listAllChannels(ctx, c) - if err != nil { - logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) - return err - } - config := multichannelfanout.NewConfigFromChannels(channels) - return updateConfig(config) - } -} - -func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) { - channels := make([]v1alpha1.Channel, 0) - cl := &v1alpha1.ChannelList{} - if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { - return nil, err - } - for _, c := range cl.Items { - if c.Status.IsReady() && shouldWatch(&c) { - channels = append(channels, c) - } - } - return channels, nil -} func shouldWatch(ch *v1alpha1.Channel) bool { return ch.Spec.Provisioner != nil && diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index b9a77670ce8..8c687d01690 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -9,6 +9,8 @@ import ( "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/eventing/pkg/sidecar/swappable" "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -16,8 +18,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error - type reconciler struct { client client.Client logger *zap.Logger @@ -34,6 +34,7 @@ func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) return reconcile.Result{}, nil } +// New creates a new instance of Channel Watcher that watches channels and calls the watchHandler on add, update, delete and generic event func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) error { c, err := controller.New("ChannelWatcher", mgr, controller.Options{ Reconciler: &reconciler{ @@ -57,3 +58,42 @@ func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) } return nil } + +// WatchHandlerFunc is called whenever an add, update, delete or generic event is triggers on a channel watch +type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error + +// ShouldWatchFunc is called while returning list of channels. +// Channels are included in the list if the return value is true. +type ShouldWatchFunc func(ch *v1alpha1.Channel) bool + +// UpdateChannelConfigWatchHandler is a special handler that +// 1. Lists the channels for which shouldWatch returns true +// 2. Creates a multi-channel-fanout-config +// 3. Calls the updateConfig func with the new multi-channel-fanout-config +// This is used by dispatchers or receivers to update their configs by watching channels +func UpdateChannelConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch ShouldWatchFunc) WatchHandlerFunc { + return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { + channels, err := listAllChannels(ctx, c, shouldWatch) + if err != nil { + logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) + return err + } + config := multichannelfanout.NewConfigFromChannels(channels) + return updateConfig(config) + } +} + +// ListAllChannels queries client and gets list of all channels for which shouldWatch returns true. +func listAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) { + channels := make([]v1alpha1.Channel, 0) + cl := &v1alpha1.ChannelList{} + if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { + return nil, err + } + for _, c := range cl.Items { + if c.Status.IsReady() && shouldWatch(&c) { + channels = append(channels, c) + } + } + return channels, nil +} diff --git a/pkg/channelwatcher/channel_watcher_test.go b/pkg/channelwatcher/channel_watcher_test.go new file mode 100644 index 00000000000..b3d5395b7ba --- /dev/null +++ b/pkg/channelwatcher/channel_watcher_test.go @@ -0,0 +1,179 @@ +package channelwatcher + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + controllertesting "github.com/knative/eventing/pkg/reconciler/testing" + "github.com/knative/eventing/pkg/sidecar/fanout" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/eventing/pkg/sidecar/swappable" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + corev1 "k8s.io/api/core/v1" +) + +func init() { + // Add types to scheme + _ = v1alpha1.AddToScheme(scheme.Scheme) +} + +func TestUpdateChannelConfigWatchHandler(t *testing.T) { + tests := []struct { + name string + channels []runtime.Object + clientListError error + updateConfigError error + expectedConfig *multichannelfanout.Config + }{ + { + name: "Client list error", + clientListError: fmt.Errorf("Client list error"), + }, + { + name: "update config error", + updateConfigError: fmt.Errorf("error updating config"), + expectedConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{}, + }, + }, + { + name: "Successfully update config", + channels: []runtime.Object{ + makechannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))), + makechannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), + makechannel("chan-3", "donotwatch", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), + }, + expectedConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Name: "chan-1", + Namespace: "ns-1", + HostName: "e.f.g.h", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + makeSubscriber("sub1"), + makeSubscriber("sub2"), + }, + }, + }, { + Name: "chan-2", + Namespace: "ns-2", + HostName: "i.j.k.l", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + makeSubscriber("sub3"), + makeSubscriber("sub4"), + }, + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actualConfig := ConfigHolder{} + watchHandler := UpdateChannelConfigWatchHandler(updateConfigWrapper(&actualConfig, test.updateConfigError), shouldWatch) + mockClient := getClient(test.channels, getClientMocks(test.clientListError)) + + actualError := watchHandler(context.TODO(), mockClient, types.NamespacedName{}) + if actualError != nil { + if test.clientListError != nil { + if diff := cmp.Diff(test.clientListError.Error(), actualError.Error()); diff != "" { + t.Fatalf("Unexpected difference (-want +got): %v", diff) + } + } + if test.updateConfigError != nil { + if diff := cmp.Diff(test.updateConfigError.Error(), actualError.Error()); diff != "" { + t.Fatalf("Unexpected difference (-want +got): %v", diff) + } + } + } else { + if test.clientListError != nil { + t.Fatalf("Want error %v \n Got nil", test.clientListError) + } + if test.updateConfigError != nil { + t.Fatalf("Want error %v \n Got nil", test.updateConfigError) + } + } + if diff := cmp.Diff(test.expectedConfig, actualConfig.config); diff != "" { + t.Fatalf("Unexpected difference (-want +got): %v", diff) + } + }) + } +} + +type ConfigHolder struct { + config *multichannelfanout.Config +} + +func shouldWatch(c *v1alpha1.Channel) bool { + if c.Namespace == "donotwatch" { + return false + } + return true +} +func updateConfigWrapper(ch *ConfigHolder, returnError error) swappable.UpdateConfig { + return func(c *multichannelfanout.Config) error { + ch.config = c + return returnError + } +} + +func getClient(objs []runtime.Object, mocks controllertesting.Mocks) *controllertesting.MockClient { + innerClient := fake.NewFakeClient(objs...) + return controllertesting.NewMockClient(innerClient, mocks) +} + +func getClientMocks(listError error) controllertesting.Mocks { + if listError != nil { + return controllertesting.Mocks{ + MockLists: []controllertesting.MockList{ + func(_ client.Client, _ context.Context, _ *client.ListOptions, _ runtime.Object) (controllertesting.MockHandled, error) { + return controllertesting.Handled, listError + }, + }, + } + } + return controllertesting.Mocks{} +} + +func makechannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) *v1alpha1.Channel { + c := v1alpha1.Channel{ + Spec: v1alpha1.ChannelSpec{ + Subscribable: subscribable, + }, + } + c.Name = name + c.Namespace = namespace + c.Status.InitializeConditions() + c.Status.MarkProvisioned() + c.Status.MarkProvisionerInstalled() + c.Status.SetAddress(hostname) + return &c +} +func makeSubscribable(subsriberSpec ...eventingduck.ChannelSubscriberSpec) *eventingduck.Subscribable { + return &eventingduck.Subscribable{ + Subscribers: subsriberSpec, + } +} + +func makeSubscriber(name string) eventingduck.ChannelSubscriberSpec { + return eventingduck.ChannelSubscriberSpec{ + Ref: &corev1.ObjectReference{ + Name: name, + Namespace: name + "-ns", + }, + SubscriberURI: name + "-suburi", + ReplyURI: name + "-replyuri", + } +} diff --git a/pkg/reconciler/testing/table.go b/pkg/reconciler/testing/table.go index 9bf2e2334e6..d26d57a4cf1 100644 --- a/pkg/reconciler/testing/table.go +++ b/pkg/reconciler/testing/table.go @@ -149,7 +149,7 @@ func (tc *TestCase) GetDynamicClient() dynamic.Interface { // GetClient returns the mockClient to use for this test case. func (tc *TestCase) GetClient() *MockClient { - builtObjects := buildAllObjects(tc.InitialState) + builtObjects := BuildAllObjects(tc.InitialState) innerClient := fake.NewFakeClient(builtObjects...) return NewMockClient(innerClient, tc.Mocks) } @@ -224,7 +224,7 @@ func (se stateErrors) Error() string { // to be present after reconciliation. func (tc *TestCase) VerifyWantPresent(c client.Client) error { var errs stateErrors - builtObjects := buildAllObjects(tc.WantPresent) + builtObjects := BuildAllObjects(tc.WantPresent) for _, wp := range builtObjects { o, err := scheme.Scheme.New(wp.GetObjectKind().GroupVersionKind()) if err != nil { @@ -304,7 +304,7 @@ func getEventsAsString(events []corev1.Event) []string { return eventsAsString } -func buildAllObjects(objs []runtime.Object) []runtime.Object { +func BuildAllObjects(objs []runtime.Object) []runtime.Object { builtObjs := []runtime.Object{} for _, obj := range objs { if builder, ok := obj.(Buildable); ok { diff --git a/pkg/sidecar/multichannelfanout/config.go b/pkg/sidecar/multichannelfanout/config.go index 84f29b85089..1c3ca420def 100644 --- a/pkg/sidecar/multichannelfanout/config.go +++ b/pkg/sidecar/multichannelfanout/config.go @@ -14,16 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package multichannelfanout provides an http.Handler that takes in one request to a Knative -// Channel and fans it out to N other requests. Logically, it represents multiple Knative Channels. -// It is made up of a map, map[channel]fanout.Handler and each incoming request is inspected to -// determine which Channel it is on. This Handler delegates the HTTP handling to the fanout.Handler -// corresponding to the incoming request's Channel. -// It is often used in conjunction with a swappable.Handler. The swappable.Handler delegates all its -// requests to the multichannelfanout.Handler. When a new configuration is available, a new -// multichannelfanout.Handler is created and swapped in for all subsequent requests. The old -// multichannelfanout.Handler is discarded. - package multichannelfanout import ( diff --git a/pkg/sidecar/multichannelfanout/config_test.go b/pkg/sidecar/multichannelfanout/config_test.go new file mode 100644 index 00000000000..a6d3d5ed782 --- /dev/null +++ b/pkg/sidecar/multichannelfanout/config_test.go @@ -0,0 +1,141 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package multichannelfanout provides an http.Handler that takes in one request to a Knative +// Channel and fans it out to N other requests. Logically, it represents multiple Knative Channels. +// It is made up of a map, map[channel]fanout.Handler and each incoming request is inspected to +// determine which Channel it is on. This Handler delegates the HTTP handling to the fanout.Handler +// corresponding to the incoming request's Channel. +// It is often used in conjunction with a swappable.Handler. The swappable.Handler delegates all its +// requests to the multichannelfanout.Handler. When a new configuration is available, a new +// multichannelfanout.Handler is created and swapped in for all subsequent requests. The old +// multichannelfanout.Handler is discarded. + +package multichannelfanout + +import ( + "testing" + + "github.com/knative/eventing/pkg/sidecar/fanout" + + "github.com/google/go-cmp/cmp" + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + corev1 "k8s.io/api/core/v1" +) + +func TestNewConfigFromChannels(t *testing.T) { + tests := []struct { + name string + channels []v1alpha1.Channel + expected *Config + }{ + { + name: "empty channels list", + channels: []v1alpha1.Channel{}, + expected: &Config{ + ChannelConfigs: []ChannelConfig{}, + }, + }, { + name: "one channel with no subscribers", + channels: []v1alpha1.Channel{ + makechannel("chan-1", "ns-1", "a.b.c.d", nil), + }, + expected: &Config{ + ChannelConfigs: []ChannelConfig{ + { + Name: "chan-1", + Namespace: "ns-1", + HostName: "a.b.c.d", + }, + }, + }, + }, { + name: "multiple channels with subscribers", + channels: []v1alpha1.Channel{ + makechannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))), + makechannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), + }, + expected: &Config{ + ChannelConfigs: []ChannelConfig{ + { + Name: "chan-1", + Namespace: "ns-1", + HostName: "e.f.g.h", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + makeSubscriber("sub1"), + makeSubscriber("sub2"), + }, + }, + }, { + Name: "chan-2", + Namespace: "ns-2", + HostName: "i.j.k.l", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + makeSubscriber("sub3"), + makeSubscriber("sub4"), + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual := NewConfigFromChannels(test.channels) + if diff := cmp.Diff(test.expected, actual); diff != "" { + t.Fatalf("Unexpected difference (-want +got): %v", diff) + } + }) + } +} + +func makechannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) v1alpha1.Channel { + c := v1alpha1.Channel{ + Spec: v1alpha1.ChannelSpec{ + Subscribable: subscribable, + }, + Status: v1alpha1.ChannelStatus{ + Address: duckv1alpha1.Addressable{ + Hostname: hostname, + }, + }, + } + c.Name = name + c.Namespace = namespace + return c +} +func makeSubscribable(subsriberSpec ...eventingduck.ChannelSubscriberSpec) *eventingduck.Subscribable { + return &eventingduck.Subscribable{ + Subscribers: subsriberSpec, + } +} + +func makeSubscriber(name string) eventingduck.ChannelSubscriberSpec { + return eventingduck.ChannelSubscriberSpec{ + Ref: &corev1.ObjectReference{ + Name: name, + Namespace: name + "-ns", + }, + SubscriberURI: name + "-suburi", + ReplyURI: name + "-replyuri", + } +} From d2c831f7985e315a981ac41f31c48c5aa8717c3f Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 14:11:12 -0700 Subject: [PATCH 13/26] Updates based on PR comments --- cmd/broker/ingress/main.go | 3 +- cmd/controller/main.go | 16 +++++----- cmd/fanoutsidecar/main.go | 16 ++++++++-- contrib/kafka/cmd/controller/main.go | 12 ++++---- contrib/kafka/main.go | 12 ++++---- pkg/provisioners/channel_util.go | 29 +++++++++++++------ pkg/provisioners/channel_util_test.go | 29 +++++++++++++++++++ .../inmemory/channel/reconcile.go | 16 ++++------ .../inmemory/channel/reconcile_test.go | 21 ++++---------- pkg/provisioners/inmemory/controller/main.go | 2 +- 10 files changed, 96 insertions(+), 60 deletions(-) diff --git a/cmd/broker/ingress/main.go b/cmd/broker/ingress/main.go index ea0094fba73..4e1ec764f55 100644 --- a/cmd/broker/ingress/main.go +++ b/cmd/broker/ingress/main.go @@ -42,10 +42,11 @@ import ( "go.opencensus.io/stats/view" "go.opencensus.io/tag" "go.uber.org/zap" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" crlog "sigs.k8s.io/controller-runtime/pkg/runtime/log" + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) var ( diff --git a/cmd/controller/main.go b/cmd/controller/main.go index a508d7092a9..36d1653a0da 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -24,20 +24,13 @@ import ( "os" "time" + eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/logconfig" "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker" "github.com/knative/eventing/pkg/reconciler/v1alpha1/channel" "github.com/knative/eventing/pkg/reconciler/v1alpha1/namespace" "github.com/knative/eventing/pkg/reconciler/v1alpha1/subscription" "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/manager" - - // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - - eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "github.com/knative/eventing/pkg/logconfig" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "github.com/knative/pkg/configmap" "github.com/knative/pkg/logging" @@ -46,9 +39,14 @@ import ( "github.com/knative/pkg/system" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" controllerruntime "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) const ( diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 54fa882d9a6..5e2fd40c6c6 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -40,9 +40,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" - // uncomment this line to debug in GKE from local machine + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) +) var ( readTimeout = 1 * time.Minute @@ -156,7 +157,12 @@ func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.Wat func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) { channels := make([]v1alpha1.Channel, 0) cl := &v1alpha1.ChannelList{} - if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { + opts := &client.ListOptions{ + // Set Raw because if we need to get more than one page, then we will put the continue token + // into opts.Raw.Continue. + Raw: &metav1.ListOptions{}, + } + if err := c.List(ctx, opts, cl); err != nil { return nil, err } for _, c := range cl.Items { @@ -164,7 +170,11 @@ func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, channels = append(channels, c) } } - return channels, nil + if cl.Continue != "" { + opts.Raw.Continue = cl.Continue + } else { + return channels, nil + } } func shouldWatch(ch *v1alpha1.Channel) bool { diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index 37e45a43349..5f57b0165de 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -4,20 +4,20 @@ import ( "flag" "os" + provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" + "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" + eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/provisioners" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" - - provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" - "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" - eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "github.com/knative/eventing/pkg/provisioners" + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) // SchemeFunc adds types to a Scheme. diff --git a/contrib/kafka/main.go b/contrib/kafka/main.go index 316f2dbd521..ec0a9067282 100644 --- a/contrib/kafka/main.go +++ b/contrib/kafka/main.go @@ -4,20 +4,20 @@ import ( "flag" "os" + provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" + "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" + eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/provisioners" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" - - provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" - "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" - eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "github.com/knative/eventing/pkg/provisioners" + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) const ( diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index f0705452d44..497dae5adc8 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -41,9 +41,14 @@ const ( // AddFinalizerResult is used indicate whether a finalizer was added or already present. type AddFinalizerResult bool +// RemoveFinalizerResult is used to indicate whether a finalizer was found and removed (FinalizerRemoved), or finalizer not found (FinalizerNotFound). +type RemoveFinalizerResult bool + const ( - FinalizerAlreadyPresent AddFinalizerResult = false - FinalizerAdded AddFinalizerResult = true + FinalizerAlreadyPresent AddFinalizerResult = false + FinalizerAdded AddFinalizerResult = true + FinalizerRemoved RemoveFinalizerResult = true + FinalizerNotFound RemoveFinalizerResult = false ) // AddFinalizer adds finalizerName to the Object. @@ -57,16 +62,22 @@ func AddFinalizer(o metav1.Object, finalizerName string) AddFinalizerResult { return FinalizerAdded } -func RemoveFinalizer(o metav1.Object, finalizerName string) { +func RemoveFinalizer(o metav1.Object, finalizerName string) RemoveFinalizerResult { + result := FinalizerNotFound finalizers := sets.NewString(o.GetFinalizers()...) - finalizers.Delete(finalizerName) - o.SetFinalizers(finalizers.List()) + if finalizers.Has(finalizerName) { + result = FinalizerRemoved + finalizers.Delete(finalizerName) + o.SetFinalizers(finalizers.List()) + } + return result } -type k8sServiceOption func(*corev1.Service) error +// K8sServiceOption is a functional option that can modify the K8s Service in CreateK8sService +type K8sServiceOption func(*corev1.Service) error // ExternalService is a functional option for CreateK8sService to create a K8s service of type ExternalName. -func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { +func ExternalService(c *eventingv1alpha1.Channel) K8sServiceOption { return func(svc *corev1.Service) error { svc.Spec = corev1.ServiceSpec{ Type: corev1.ServiceTypeExternalName, @@ -76,7 +87,7 @@ func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { } } -func CreateK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel, opts ...k8sServiceOption) (*corev1.Service, error) { +func CreateK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel, opts ...K8sServiceOption) (*corev1.Service, error) { getSvc := func() (*corev1.Service, error) { return getK8sService(ctx, client, c) } @@ -269,7 +280,7 @@ func UpdateChannel(ctx context.Context, client runtimeClient.Client, u *eventing // newK8sService creates a new Service for a Channel resource. It also sets the appropriate // OwnerReferences on the resource so handleObject can discover the Channel resource that 'owns' it. // As well as being garbage collected when the Channel is deleted. -func newK8sService(c *eventingv1alpha1.Channel, opts ...k8sServiceOption) (*corev1.Service, error) { +func newK8sService(c *eventingv1alpha1.Channel, opts ...K8sServiceOption) (*corev1.Service, error) { // Add annotations svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/provisioners/channel_util_test.go b/pkg/provisioners/channel_util_test.go index 4f89ffb7b56..848dda5d8f3 100644 --- a/pkg/provisioners/channel_util_test.go +++ b/pkg/provisioners/channel_util_test.go @@ -404,6 +404,35 @@ func TestAddFinalizer(t *testing.T) { } } +func TestRemoveFinalizer(t *testing.T) { + testCases := map[string]struct { + expected RemoveFinalizerResult + }{ + "Finalizer not found": { + expected: false, + }, + "Finalizer removed successfully": { + expected: true, + }, + } + finalizer := "test-finalizer" + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + c := getNewChannel() + if tc.expected { + c.Finalizers = []string{finalizer} + } else { + c.Finalizers = []string{} + } + actual := RemoveFinalizer(c, finalizer) + + if diff := cmp.Diff(actual, tc.expected); diff != "" { + t.Errorf("unexpected error (-want, +got) = %v", diff) + } + }) + } +} + func TestChannelNames(t *testing.T) { testCases := []struct { Name string diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go index 630f22b4d6b..5d5f8392a82 100644 --- a/pkg/provisioners/inmemory/channel/reconcile.go +++ b/pkg/provisioners/inmemory/channel/reconcile.go @@ -85,17 +85,13 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } logger.Info("Reconciling Channel") - // Finalizer needs to be removed (even though no finalizers are added) main back compat - // with v0.5 in which a finalzier was added. Or else channels will not get deleted after upgrading to 0.6 - // TODO: Remove this entire if block in v0.7+ - if c.DeletionTimestamp != nil { - // K8s garbage collection will delete the K8s service and VirtualService for this channel. - // We use a finalizer to ensure the channel config has been synced. - util.RemoveFinalizer(c, finalizerName) + // Finalizer needs to be removed (even though no finalizers are added) to maintain backwards compatibility + // with v0.5 in which a finalzier was added. Or else channels will not get deleted after upgrading to 0.6+ + if result := util.RemoveFinalizer(c, finalizerName); result == util.FinalizerRemoved { r.client.Update(ctx, c) - logger.Info("Channel reconciled") - r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q", c.Name) - return reconcile.Result{}, nil + logger.Info("Channel reconciled. Finalizer Removed") + r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q. Finalizer removed.", c.Name) + return reconcile.Result{Requeue: true}, nil } err = r.reconcile(ctx, c) diff --git a/pkg/provisioners/inmemory/channel/reconcile_test.go b/pkg/provisioners/inmemory/channel/reconcile_test.go index 2978419df42..0b221854a41 100644 --- a/pkg/provisioners/inmemory/channel/reconcile_test.go +++ b/pkg/provisioners/inmemory/channel/reconcile_test.go @@ -22,6 +22,8 @@ import ( "fmt" "testing" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" util "github.com/knative/eventing/pkg/provisioners" @@ -245,16 +247,17 @@ func TestReconcile(t *testing.T) { }, }, { - Name: "Channel deleted - finalizer removed", + Name: "Channel has finalizer (to test back compat with version <= 0.5, when finalizers were added", InitialState: []runtime.Object{ - makeDeletingChannel(), + makeChannelWithFinalizer(), }, WantPresent: []runtime.Object{ - makeDeletingChannelWithoutFinalizer(), + makeChannel(), }, WantEvent: []corev1.Event{ events[channelReconciled], }, + WantResult: reconcile.Result{Requeue: true}, }, { Name: "K8s service get fails", @@ -402,18 +405,6 @@ func makeChannelWithFinalizer() *eventingv1alpha1.Channel { return c } -func makeDeletingChannel() *eventingv1alpha1.Channel { - c := makeChannelWithFinalizer() - c.DeletionTimestamp = &deletionTime - return c -} - -func makeDeletingChannelWithoutFinalizer() *eventingv1alpha1.Channel { - c := makeDeletingChannel() - c.Finalizers = nil - return c -} - func makeK8sService(pn ...string) *corev1.Service { return &corev1.Service{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/provisioners/inmemory/controller/main.go b/pkg/provisioners/inmemory/controller/main.go index fbbdd262dff..577bdc948ce 100644 --- a/pkg/provisioners/inmemory/controller/main.go +++ b/pkg/provisioners/inmemory/controller/main.go @@ -29,7 +29,7 @@ import ( "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" - // uncomment this line to debug in GKE from local machine + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) From 16a6ffc29389e54f1f80c7ff89b9a23fb32bd6d6 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 14:13:34 -0700 Subject: [PATCH 14/26] Updates based on PR comments --- cmd/broker/ingress/main.go | 2 +- cmd/controller/main.go | 2 +- cmd/fanoutsidecar/main.go | 3 +-- contrib/kafka/cmd/controller/main.go | 2 +- contrib/kafka/main.go | 2 +- pkg/provisioners/inmemory/controller/main.go | 2 +- 6 files changed, 6 insertions(+), 7 deletions(-) diff --git a/cmd/broker/ingress/main.go b/cmd/broker/ingress/main.go index 4e1ec764f55..5317d44320c 100644 --- a/cmd/broker/ingress/main.go +++ b/cmd/broker/ingress/main.go @@ -46,7 +46,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" crlog "sigs.k8s.io/controller-runtime/pkg/runtime/log" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) var ( diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 36d1653a0da..82851fc1bce 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -46,7 +46,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) const ( diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 5e2fd40c6c6..24d793e07eb 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -41,8 +41,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" -) + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) var ( diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index 5f57b0165de..375361f4af3 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -17,7 +17,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) // SchemeFunc adds types to a Scheme. diff --git a/contrib/kafka/main.go b/contrib/kafka/main.go index ec0a9067282..ed98481c20b 100644 --- a/contrib/kafka/main.go +++ b/contrib/kafka/main.go @@ -17,7 +17,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) const ( diff --git a/pkg/provisioners/inmemory/controller/main.go b/pkg/provisioners/inmemory/controller/main.go index 577bdc948ce..2b09c992b4f 100644 --- a/pkg/provisioners/inmemory/controller/main.go +++ b/pkg/provisioners/inmemory/controller/main.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) func main() { From 67611dc715f72e199330b2c76c5a4d10a7ed920f Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 14:25:56 -0700 Subject: [PATCH 15/26] Fixed UTs --- cmd/fanoutsidecar/main.go | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 24d793e07eb..370289ffb5e 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -35,6 +35,7 @@ import ( "github.com/knative/eventing/pkg/sidecar/swappable" "go.uber.org/zap" "go.uber.org/zap/zapcore" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -155,24 +156,26 @@ func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.Wat func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) { channels := make([]v1alpha1.Channel, 0) - cl := &v1alpha1.ChannelList{} - opts := &client.ListOptions{ - // Set Raw because if we need to get more than one page, then we will put the continue token - // into opts.Raw.Continue. - Raw: &metav1.ListOptions{}, - } - if err := c.List(ctx, opts, cl); err != nil { - return nil, err - } - for _, c := range cl.Items { - if c.Status.IsReady() && shouldWatch(&c) { - channels = append(channels, c) + for { + cl := &v1alpha1.ChannelList{} + opts := &client.ListOptions{ + // Set Raw because if we need to get more than one page, then we will put the continue token + // into opts.Raw.Continue. + Raw: &metav1.ListOptions{}, + } + if err := c.List(ctx, opts, cl); err != nil { + return nil, err + } + for _, c := range cl.Items { + if c.Status.IsReady() && shouldWatch(&c) { + channels = append(channels, c) + } + } + if cl.Continue != "" { + opts.Raw.Continue = cl.Continue + } else { + return channels, nil } - } - if cl.Continue != "" { - opts.Raw.Continue = cl.Continue - } else { - return channels, nil } } From 2cc8525d17a627f2fde061564420cc1320b6e7cc Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 14:50:51 -0700 Subject: [PATCH 16/26] Updated VENDOR_LICENSE --- third_party/VENDOR-LICENSE | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/third_party/VENDOR-LICENSE b/third_party/VENDOR-LICENSE index e8d7037e247..697031fbfb7 100644 --- a/third_party/VENDOR-LICENSE +++ b/third_party/VENDOR-LICENSE @@ -627,40 +627,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -=========================================================== -Import: github.com/knative/eventing/vendor/github.com/fsnotify/fsnotify - -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - =========================================================== Import: github.com/knative/eventing/vendor/github.com/ghodss/yaml From 3b3f16f5b332c4a43adfa532ffb9a1ff7ad51d9b Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 15:46:44 -0700 Subject: [PATCH 17/26] WIP. Update fanout sidecar --- cmd/fanoutsidecar/main.go | 44 +------------------ pkg/channelwatcher/channel_watcher.go | 35 ++++++++++----- pkg/provisioners/channel_util.go | 8 ++-- .../inmemory/channel/reconcile.go | 1 - 4 files changed, 29 insertions(+), 59 deletions(-) diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 5ca9120c09d..93c1d161320 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -29,14 +29,9 @@ import ( "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/channelwatcher" - "github.com/knative/eventing/pkg/logging" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/sidecar/swappable" "go.uber.org/zap" "go.uber.org/zap/zapcore" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" @@ -136,48 +131,11 @@ func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfi logger.Error("Error while adding eventing scheme to manager.", zap.Error(err)) return nil, err } - channelwatcher.New(mgr, logger, updateChannelConfig(configUpdated)) + channelwatcher.New(mgr, logger, channelwatcher.UpdateChannelConfigWatchHandler(configUpdated, shouldWatch)) return mgr, nil } -func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.WatchHandlerFunc { - return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { - channels, err := listAllChannels(ctx, c) - if err != nil { - logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) - return err - } - config := multichannelfanout.NewConfigFromChannels(channels) - return updateConfig(config) - } -} - -func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) { - channels := make([]v1alpha1.Channel, 0) - for { - cl := &v1alpha1.ChannelList{} - opts := &client.ListOptions{ - // Set Raw because if we need to get more than one page, then we will put the continue token - // into opts.Raw.Continue. - Raw: &metav1.ListOptions{}, - } - if err := c.List(ctx, opts, cl); err != nil { - return nil, err - } - for _, c := range cl.Items { - if c.Status.IsReady() && shouldWatch(&c) { - channels = append(channels, c) - } - } - if cl.Continue != "" { - opts.Raw.Continue = cl.Continue - } else { - return channels, nil - } - } -} - func shouldWatch(ch *v1alpha1.Channel) bool { if ch.Spec.Provisioner != nil && ch.Spec.Provisioner.Namespace == "" { for _, v := range channelProvisioners { diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index 8c687d01690..e81557543a4 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -3,18 +3,18 @@ package channelwatcher import ( "context" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/sidecar/swappable" "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) @@ -86,14 +86,25 @@ func UpdateChannelConfigWatchHandler(updateConfig swappable.UpdateConfig, should // ListAllChannels queries client and gets list of all channels for which shouldWatch returns true. func listAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) { channels := make([]v1alpha1.Channel, 0) - cl := &v1alpha1.ChannelList{} - if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { - return nil, err - } - for _, c := range cl.Items { - if c.Status.IsReady() && shouldWatch(&c) { - channels = append(channels, c) + for { + cl := &v1alpha1.ChannelList{} + opts := &client.ListOptions{ + // Set Raw because if we need to get more than one page, then we will put the continue token + // into opts.Raw.Continue. + Raw: &metav1.ListOptions{}, + } + if err := c.List(ctx, opts, cl); err != nil { + return nil, err + } + for _, c := range cl.Items { + if c.Status.IsReady() && shouldWatch(&c) { + channels = append(channels, c) + } + } + if cl.Continue != "" { + opts.Raw.Continue = cl.Continue + } else { + return channels, nil } } - return channels, nil } diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index 497dae5adc8..a252465d37e 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -62,15 +62,17 @@ func AddFinalizer(o metav1.Object, finalizerName string) AddFinalizerResult { return FinalizerAdded } +// RemoveFinalizer removes the finalizer(finalizerName) from the object(o) if the finalizer is present. +// Returns: - FinalizerRemoved, if the finalizer was found and removed. +// - FinalizerNotFound, if the finalizer was not found. func RemoveFinalizer(o metav1.Object, finalizerName string) RemoveFinalizerResult { - result := FinalizerNotFound finalizers := sets.NewString(o.GetFinalizers()...) if finalizers.Has(finalizerName) { - result = FinalizerRemoved finalizers.Delete(finalizerName) o.SetFinalizers(finalizers.List()) + return FinalizerRemoved } - return result + return FinalizerNotFound } // K8sServiceOption is a functional option that can modify the K8s Service in CreateK8sService diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go index 8d1368b6bf1..5d5f8392a82 100644 --- a/pkg/provisioners/inmemory/channel/reconcile.go +++ b/pkg/provisioners/inmemory/channel/reconcile.go @@ -109,7 +109,6 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err r.recorder.Eventf(c, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update Channel's status: %v", err) return reconcile.Result{}, updateStatusErr } - return reconcile.Result{}, err } From f065d2292ca4cd3a691afc3431ee9cbc69ce6408 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 15:54:34 -0700 Subject: [PATCH 18/26] Merge from upstream master --- pkg/channelwatcher/channel_watcher.go | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index fdbcb539432..98f53314391 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -3,7 +3,6 @@ package channelwatcher import ( "context" -<<<<<<< HEAD "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/sidecar/multichannelfanout" @@ -19,23 +18,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -======= - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "github.com/knative/eventing/pkg/logging" - "go.uber.org/zap" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error - ->>>>>>> f8317dd0ead16253fcea1a61a749bb228708f117 type reconciler struct { client client.Client logger *zap.Logger @@ -52,10 +34,7 @@ func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) return reconcile.Result{}, nil } -<<<<<<< HEAD // New creates a new instance of Channel Watcher that watches channels and calls the watchHandler on add, update, delete and generic event -======= ->>>>>>> f8317dd0ead16253fcea1a61a749bb228708f117 func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) error { c, err := controller.New("ChannelWatcher", mgr, controller.Options{ Reconciler: &reconciler{ @@ -79,7 +58,6 @@ func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) } return nil } -<<<<<<< HEAD // WatchHandlerFunc is called whenever an add, update, delete or generic event is triggers on a channel watch type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error @@ -105,7 +83,7 @@ func UpdateChannelConfigWatchHandler(updateConfig swappable.UpdateConfig, should } } -// ListAllChannels queries client and gets list of all channels for which shouldWatch returns true. +// listAllChannels queries client and gets list of all channels for which shouldWatch returns true. func listAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) { channels := make([]v1alpha1.Channel, 0) for { @@ -130,5 +108,3 @@ func listAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWat } } } -======= ->>>>>>> f8317dd0ead16253fcea1a61a749bb228708f117 From a645dcea4231ead306485126d8f2afe147dd787b Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 17:31:19 -0700 Subject: [PATCH 19/26] UTs pass, ITs passed. COde ready for PR --- cmd/fanoutsidecar/main.go | 2 +- contrib/kafka/cmd/dispatcher/main.go | 2 +- contrib/kafka/config/kafka.yaml | 2 +- contrib/kafka/pkg/dispatcher/dispatcher.go | 10 ++++--- .../kafka/pkg/dispatcher/dispatcher_test.go | 2 +- pkg/channelwatcher/channel_watcher.go | 6 ++-- pkg/channelwatcher/channel_watcher_test.go | 4 +-- pkg/provisioners/channel_util.go | 2 +- pkg/provisioners/message_receiver.go | 30 +++++++++---------- pkg/reconciler/testing/table.go | 6 ++-- 10 files changed, 33 insertions(+), 33 deletions(-) diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 93c1d161320..fc1927d0f01 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -131,7 +131,7 @@ func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfi logger.Error("Error while adding eventing scheme to manager.", zap.Error(err)) return nil, err } - channelwatcher.New(mgr, logger, channelwatcher.UpdateChannelConfigWatchHandler(configUpdated, shouldWatch)) + channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(configUpdated, shouldWatch)) return mgr, nil } diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index 7f15663181c..1ab05718539 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -56,7 +56,7 @@ func main() { } v1alpha1.AddToScheme(mgr.GetScheme()) - channelwatcher.New(mgr, logger, channelwatcher.UpdateChannelConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)) + channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)) if err != nil { logger.Fatal("Unable to create channel watcher.", zap.Error(err)) } diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index 82298b56519..c58fb14296b 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -108,7 +108,7 @@ metadata: namespace: knative-eventing data: # Broker URL's for the provisioner. Replace this with the URL's for your kafka cluster. - bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 + bootstrap_servers: kafkabroker.kafka:9092 # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index bfe4790d062..9c84f6f4d4a 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -34,6 +34,7 @@ import ( ) type KafkaDispatcher struct { + // TODO: config doesn't have to be atomic as it is read an updated using updateLock. config atomic.Value hostToChannelMap atomic.Value updateLock sync.Mutex @@ -42,8 +43,10 @@ type KafkaDispatcher struct { dispatcher *provisioners.MessageDispatcher kafkaAsyncProducer sarama.AsyncProducer - kafkaConsumers map[provisioners.ChannelReference]map[subscription]KafkaConsumer - kafkaCluster KafkaCluster + // TODO: kafkaConsumer map should probably be atomic as it is updated and read on separate go routines with no syncchronization. + // Verify if this is an issue and fix accordignly + kafkaConsumers map[provisioners.ChannelReference]map[subscription]KafkaConsumer + kafkaCluster KafkaCluster logger *zap.Logger } @@ -140,7 +143,6 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error // Update the config so that it can be used for comparison during next sync d.setConfig(config) - } return nil } @@ -150,7 +152,7 @@ func createHostToChannelMap(config *multichannelfanout.Config) (map[string]provi for _, cConfig := range config.ChannelConfigs { if cr, ok := hcMap[cConfig.HostName]; ok { return nil, fmt.Errorf( - "Duplicate hostName found. HostName:%s, channel:%s.%s, channel:%s.%s", + "Duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s", cConfig.HostName, cConfig.Namespace, cConfig.Name, diff --git a/contrib/kafka/pkg/dispatcher/dispatcher_test.go b/contrib/kafka/pkg/dispatcher/dispatcher_test.go index 9d0a32222fb..9e697e83262 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher_test.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher_test.go @@ -406,7 +406,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { }, }, }, - createErr: "Duplicate hostName found. HostName:a.b.c.d, channel:default.test-channel-2, channel:default.test-channel-1", + createErr: "Duplicate hostName found. Each channel must have a unique host header. HostName:a.b.c.d, channel:default.test-channel-2, channel:default.test-channel-1", oldHostToChanMap: map[string]provisioners.ChannelReference{}, }, } diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index 98f53314391..8837ffc8747 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -59,19 +59,19 @@ func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) return nil } -// WatchHandlerFunc is called whenever an add, update, delete or generic event is triggers on a channel watch +// WatchHandlerFunc is called whenever an add, update, delete or generic event is triggered on a channel type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error // ShouldWatchFunc is called while returning list of channels. // Channels are included in the list if the return value is true. type ShouldWatchFunc func(ch *v1alpha1.Channel) bool -// UpdateChannelConfigWatchHandler is a special handler that +// UpdateConfigWatchHandler is a special handler that // 1. Lists the channels for which shouldWatch returns true // 2. Creates a multi-channel-fanout-config // 3. Calls the updateConfig func with the new multi-channel-fanout-config // This is used by dispatchers or receivers to update their configs by watching channels -func UpdateChannelConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch ShouldWatchFunc) WatchHandlerFunc { +func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch ShouldWatchFunc) WatchHandlerFunc { return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { channels, err := listAllChannels(ctx, c, shouldWatch) if err != nil { diff --git a/pkg/channelwatcher/channel_watcher_test.go b/pkg/channelwatcher/channel_watcher_test.go index b3d5395b7ba..56f9f772873 100644 --- a/pkg/channelwatcher/channel_watcher_test.go +++ b/pkg/channelwatcher/channel_watcher_test.go @@ -26,7 +26,7 @@ func init() { _ = v1alpha1.AddToScheme(scheme.Scheme) } -func TestUpdateChannelConfigWatchHandler(t *testing.T) { +func TestUpdateConfigWatchHandler(t *testing.T) { tests := []struct { name string channels []runtime.Object @@ -82,7 +82,7 @@ func TestUpdateChannelConfigWatchHandler(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { actualConfig := ConfigHolder{} - watchHandler := UpdateChannelConfigWatchHandler(updateConfigWrapper(&actualConfig, test.updateConfigError), shouldWatch) + watchHandler := UpdateConfigWatchHandler(updateConfigWrapper(&actualConfig, test.updateConfigError), shouldWatch) mockClient := getClient(test.channels, getClientMocks(test.clientListError)) actualError := watchHandler(context.TODO(), mockClient, types.NamespacedName{}) diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index a252465d37e..d61205c6897 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -64,7 +64,7 @@ func AddFinalizer(o metav1.Object, finalizerName string) AddFinalizerResult { // RemoveFinalizer removes the finalizer(finalizerName) from the object(o) if the finalizer is present. // Returns: - FinalizerRemoved, if the finalizer was found and removed. -// - FinalizerNotFound, if the finalizer was not found. +// - FinalizerNotFound, if the finalizer was not found. func RemoveFinalizer(o metav1.Object, finalizerName string) RemoveFinalizerResult { finalizers := sets.NewString(o.GetFinalizers()...) if finalizers.Has(finalizerName) { diff --git a/pkg/provisioners/message_receiver.go b/pkg/provisioners/message_receiver.go index b4fceb1022c..4c9bac7f93a 100644 --- a/pkg/provisioners/message_receiver.go +++ b/pkg/provisioners/message_receiver.go @@ -32,7 +32,7 @@ const ( MessageReceiverPort = 8080 ) -// Message receiver receives messages. +// MessageReceiver receives messages. type MessageReceiver struct { receiverFunc func(ChannelReference, *Message) error forwardHeaders sets.String @@ -41,14 +41,16 @@ type MessageReceiver struct { hostToChannelFunc ResolveChannelFromHostFunc } -type receiverOptions func(*MessageReceiver) error +// ReceiverOptions provides functional options to MessageReceiver function +type ReceiverOptions func(*MessageReceiver) error +// ResolveChannelFromHostFunc function enables MessageReceiver to get the Channel Reference from incoming request HostHeader +// before calling receiverFunc type ResolveChannelFromHostFunc func(string) (ChannelReference, error) -// ResolveChannelFromHostHeader is a receiverOption that enables the consumer of the MessageReceiver -// to pass a map[]ChannelReference. This map will then be used to to get the ChannelReference -// from httpRequest.Host before calling receiverFunc -func ResolveChannelFromHostHeader(hostToChannelFunc ResolveChannelFromHostFunc) receiverOptions { +// ResolveChannelFromHostHeader is a ReceiverOption for NewMessageReceiver which enables the caller to overwrite the +// default behaviour defined by ParseChannel function +func ResolveChannelFromHostHeader(hostToChannelFunc ResolveChannelFromHostFunc) ReceiverOptions { return func(r *MessageReceiver) error { r.hostToChannelFunc = hostToChannelFunc return nil @@ -57,23 +59,19 @@ func ResolveChannelFromHostHeader(hostToChannelFunc ResolveChannelFromHostFunc) // NewMessageReceiver creates a message receiver passing new messages to the // receiverFunc. -func NewMessageReceiver(receiverFunc func(ChannelReference, *Message) error, logger *zap.SugaredLogger, opts ...receiverOptions) (*MessageReceiver, error) { +func NewMessageReceiver(receiverFunc func(ChannelReference, *Message) error, logger *zap.SugaredLogger, opts ...ReceiverOptions) (*MessageReceiver, error) { receiver := &MessageReceiver{ - receiverFunc: receiverFunc, - forwardHeaders: sets.NewString(forwardHeaders...), - forwardPrefixes: forwardPrefixes, - - logger: logger, + receiverFunc: receiverFunc, + forwardHeaders: sets.NewString(forwardHeaders...), + forwardPrefixes: forwardPrefixes, + hostToChannelFunc: ResolveChannelFromHostFunc(ParseChannel), + logger: logger, } for _, opt := range opts { if err := opt(receiver); err != nil { return nil, err } } - // Default to old behaviour of host = channelName.channelNamespace - if receiver.hostToChannelFunc == nil { - receiver.hostToChannelFunc = ResolveChannelFromHostFunc(ParseChannel) - } return receiver, nil } diff --git a/pkg/reconciler/testing/table.go b/pkg/reconciler/testing/table.go index d26d57a4cf1..9bf2e2334e6 100644 --- a/pkg/reconciler/testing/table.go +++ b/pkg/reconciler/testing/table.go @@ -149,7 +149,7 @@ func (tc *TestCase) GetDynamicClient() dynamic.Interface { // GetClient returns the mockClient to use for this test case. func (tc *TestCase) GetClient() *MockClient { - builtObjects := BuildAllObjects(tc.InitialState) + builtObjects := buildAllObjects(tc.InitialState) innerClient := fake.NewFakeClient(builtObjects...) return NewMockClient(innerClient, tc.Mocks) } @@ -224,7 +224,7 @@ func (se stateErrors) Error() string { // to be present after reconciliation. func (tc *TestCase) VerifyWantPresent(c client.Client) error { var errs stateErrors - builtObjects := BuildAllObjects(tc.WantPresent) + builtObjects := buildAllObjects(tc.WantPresent) for _, wp := range builtObjects { o, err := scheme.Scheme.New(wp.GetObjectKind().GroupVersionKind()) if err != nil { @@ -304,7 +304,7 @@ func getEventsAsString(events []corev1.Event) []string { return eventsAsString } -func BuildAllObjects(objs []runtime.Object) []runtime.Object { +func buildAllObjects(objs []runtime.Object) []runtime.Object { builtObjs := []runtime.Object{} for _, obj := range objs { if builder, ok := obj.(Buildable); ok { From 09e4dfa03adc27e7bf516a3bddb5fcd98d2e71e7 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Tue, 16 Apr 2019 14:34:18 -0700 Subject: [PATCH 20/26] Updates based on PR comments --- contrib/kafka/cmd/dispatcher/main.go | 8 +++++--- pkg/channelwatcher/channel_watcher.go | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index 1ab05718539..b0d1c2ac286 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -55,9 +55,11 @@ func main() { logger.Fatal("Unable to add kafkaDispatcher", zap.Error(err)) } - v1alpha1.AddToScheme(mgr.GetScheme()) - channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)) - if err != nil { + if err := v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { + logger.Fatal("Unable to add scheme for eventing apis.", zap.Error(err)) + } + + if err := channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)); err != nil { logger.Fatal("Unable to create channel watcher.", zap.Error(err)) } diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index 8837ffc8747..ff8a7852e65 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -72,7 +72,7 @@ type ShouldWatchFunc func(ch *v1alpha1.Channel) bool // 3. Calls the updateConfig func with the new multi-channel-fanout-config // This is used by dispatchers or receivers to update their configs by watching channels func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch ShouldWatchFunc) WatchHandlerFunc { - return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { + return func(ctx context.Context, c client.Client, _ types.NamespacedName) error { channels, err := listAllChannels(ctx, c, shouldWatch) if err != nil { logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) From d1a1bd57d2ba3bb59ee3a048cbcff16e87479cb5 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Wed, 17 Apr 2019 18:03:28 -0700 Subject: [PATCH 21/26] Changes based on PR comments --- .../pkg/dispatcher/receiver/receiver_test.go | 4 +-- contrib/kafka/config/kafka.yaml | 18 ---------- .../pkg/controller/channel/reconcile_test.go | 2 -- contrib/kafka/pkg/dispatcher/dispatcher.go | 22 ++++++------ .../kafka/pkg/dispatcher/dispatcher_test.go | 2 +- pkg/channelwatcher/channel_watcher.go | 36 +++++++------------ pkg/channelwatcher/channel_watcher_test.go | 8 ++--- pkg/provisioners/message_receiver.go | 6 ++-- pkg/sidecar/fanout/fanout_handler_test.go | 6 ++-- pkg/sidecar/multichannelfanout/config.go | 2 +- pkg/sidecar/multichannelfanout/config_test.go | 18 +++------- 11 files changed, 41 insertions(+), 83 deletions(-) diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go index d6d69db23b6..c4789c2c9ed 100644 --- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go +++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go @@ -134,14 +134,14 @@ func TestReceiver(t *testing.T) { fake.NewFakeClient(tc.initialState...), fakepubsub.Creator(tc.pubSubData)) if err != nil { - t.Errorf("Error when creating a New receiver. Error:%s", err) + t.Fatalf("Error when creating a New receiver. Error:%s", err) } resp := httptest.NewRecorder() req := httptest.NewRequest("POST", "/", strings.NewReader(validMessage)) req.Host = "test-channel.test-namespace.channels." + utils.GetClusterDomainName() receiver, err := mr.newMessageReceiver() if err != nil { - t.Errorf("Error when creating a new message receiver. Error:%s", err) + t.Fatalf("Error when creating a new message receiver. Error:%s", err) } receiver.HandleRequest(resp, req) if tc.expectedErr { diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index c58fb14296b..990c7aa20cc 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -67,24 +67,6 @@ rules: - kafka-channel-dispatcher verbs: - update - - apiGroups: - - networking.istio.io - resources: - - virtualservices - verbs: - - get - - list - - watch - - create - - update - - apiGroups: - - "" # Core API Group. - resources: - - events - verbs: - - create - - patch - - update --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/contrib/kafka/pkg/controller/channel/reconcile_test.go b/contrib/kafka/pkg/controller/channel/reconcile_test.go index 33fad32efb3..02836e06a54 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile_test.go +++ b/contrib/kafka/pkg/controller/channel/reconcile_test.go @@ -34,7 +34,6 @@ import ( controllertesting "github.com/knative/eventing/pkg/reconciler/testing" "github.com/knative/eventing/pkg/utils" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "github.com/knative/pkg/system" _ "github.com/knative/pkg/system/testing" corev1 "k8s.io/api/core/v1" @@ -72,7 +71,6 @@ var ( func init() { // Add types to scheme eventingv1alpha1.AddToScheme(scheme.Scheme) - istiov1alpha3.AddToScheme(scheme.Scheme) } var mockFetchError = controllertesting.Mocks{ diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index 9c84f6f4d4a..bef6061a75f 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -34,7 +34,7 @@ import ( ) type KafkaDispatcher struct { - // TODO: config doesn't have to be atomic as it is read an updated using updateLock. + // TODO: config doesn't have to be atomic as it is read and updated using updateLock. config atomic.Value hostToChannelMap atomic.Value updateLock sync.Mutex @@ -43,10 +43,8 @@ type KafkaDispatcher struct { dispatcher *provisioners.MessageDispatcher kafkaAsyncProducer sarama.AsyncProducer - // TODO: kafkaConsumer map should probably be atomic as it is updated and read on separate go routines with no syncchronization. - // Verify if this is an issue and fix accordignly - kafkaConsumers map[provisioners.ChannelReference]map[subscription]KafkaConsumer - kafkaCluster KafkaCluster + kafkaConsumers map[provisioners.ChannelReference]map[subscription]KafkaConsumer + kafkaCluster KafkaCluster logger *zap.Logger } @@ -91,7 +89,7 @@ type subscription struct { // ConfigDiff diffs the new config with the existing config. If there are no differences, then the // empty string is returned. If there are differences, then a non-empty string is returned // describing the differences. -func (d *KafkaDispatcher) ConfigDiff(updated *multichannelfanout.Config) string { +func (d *KafkaDispatcher) configDiff(updated *multichannelfanout.Config) string { return cmp.Diff(d.getConfig(), updated) } @@ -103,7 +101,7 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error d.updateLock.Lock() defer d.updateLock.Unlock() - if diff := d.ConfigDiff(config); diff != "" { + if diff := d.configDiff(config); diff != "" { d.logger.Info("Updating config (-old +new)", zap.String("diff", diff)) newSubs := make(map[subscription]bool) @@ -148,11 +146,11 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error } func createHostToChannelMap(config *multichannelfanout.Config) (map[string]provisioners.ChannelReference, error) { - hcMap := make(map[string]provisioners.ChannelReference) + hcMap := make(map[string]provisioners.ChannelReference, len(config.ChannelConfigs)) for _, cConfig := range config.ChannelConfigs { if cr, ok := hcMap[cConfig.HostName]; ok { return nil, fmt.Errorf( - "Duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s", + "duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s", cConfig.HostName, cConfig.Namespace, cConfig.Name, @@ -190,6 +188,8 @@ func (d *KafkaDispatcher) Start(stopCh <-chan struct{}) error { return d.receiver.Start(stopCh) } +// subscribe reads kafkaConsumers which gets updated in UpdateConfig in a separate go-routine. +// subscribe must be called under updateLock. func (d *KafkaDispatcher) subscribe(channelRef provisioners.ChannelReference, sub subscription) error { d.logger.Info("Subscribing", zap.Any("channelRef", channelRef), zap.Any("subscription", sub)) @@ -262,6 +262,8 @@ func (d *KafkaDispatcher) dispatch(channelRef provisioners.ChannelReference, sub return err } +// unsubscribe reads kafkaConsumers which gets updated in UpdateConfig in a separate go-routine. +// unsubscribe must be called under updateLock. func (d *KafkaDispatcher) unsubscribe(channel provisioners.ChannelReference, sub subscription) error { d.logger.Info("Unsubscribing from channel", zap.Any("channel", channel), zap.Any("subscription", sub)) if consumer, ok := d.kafkaConsumers[channel][sub]; ok { @@ -336,7 +338,7 @@ func (d *KafkaDispatcher) getChannelReferenceFromHost(host string) (provisioners chMap := d.getHostToChannelMap() cr, ok := chMap[host] if !ok { - return cr, fmt.Errorf("Invalid HostName:%s. HostName not found in ConfigMap for any Channel", host) + return cr, fmt.Errorf("invalid Hostname:%s. Hostname not found in ConfigMap for any Channel", host) } return cr, nil } diff --git a/contrib/kafka/pkg/dispatcher/dispatcher_test.go b/contrib/kafka/pkg/dispatcher/dispatcher_test.go index 9e697e83262..8f0e4f39169 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher_test.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher_test.go @@ -406,7 +406,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { }, }, }, - createErr: "Duplicate hostName found. Each channel must have a unique host header. HostName:a.b.c.d, channel:default.test-channel-2, channel:default.test-channel-1", + createErr: "duplicate hostName found. Each channel must have a unique host header. HostName:a.b.c.d, channel:default.test-channel-2, channel:default.test-channel-1", oldHostToChanMap: map[string]provisioners.ChannelReference{}, }, } diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index ff8a7852e65..a2822b8df45 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -8,7 +8,6 @@ import ( "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/sidecar/swappable" "go.uber.org/zap" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -67,10 +66,10 @@ type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) type ShouldWatchFunc func(ch *v1alpha1.Channel) bool // UpdateConfigWatchHandler is a special handler that -// 1. Lists the channels for which shouldWatch returns true -// 2. Creates a multi-channel-fanout-config -// 3. Calls the updateConfig func with the new multi-channel-fanout-config -// This is used by dispatchers or receivers to update their configs by watching channels +// 1. Lists the channels for which shouldWatch returns true. +// 2. Creates a multi-channel-fanout-config. +// 3. Calls the updateConfig func with the new multi-channel-fanout-config. +// This is used by dispatchers or receivers to update their configs by watching channels. func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch ShouldWatchFunc) WatchHandlerFunc { return func(ctx context.Context, c client.Client, _ types.NamespacedName) error { channels, err := listAllChannels(ctx, c, shouldWatch) @@ -86,25 +85,14 @@ func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch S // listAllChannels queries client and gets list of all channels for which shouldWatch returns true. func listAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) { channels := make([]v1alpha1.Channel, 0) - for { - cl := &v1alpha1.ChannelList{} - opts := &client.ListOptions{ - // Set Raw because if we need to get more than one page, then we will put the continue token - // into opts.Raw.Continue. - Raw: &metav1.ListOptions{}, - } - if err := c.List(ctx, opts, cl); err != nil { - return nil, err - } - for _, c := range cl.Items { - if c.Status.IsReady() && shouldWatch(&c) { - channels = append(channels, c) - } - } - if cl.Continue != "" { - opts.Raw.Continue = cl.Continue - } else { - return channels, nil + cl := &v1alpha1.ChannelList{} + if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { + return nil, err + } + for _, c := range cl.Items { + if c.Status.IsReady() && shouldWatch(&c) { + channels = append(channels, c) } } + return channels, nil } diff --git a/pkg/channelwatcher/channel_watcher_test.go b/pkg/channelwatcher/channel_watcher_test.go index 56f9f772873..0ac1efe43bc 100644 --- a/pkg/channelwatcher/channel_watcher_test.go +++ b/pkg/channelwatcher/channel_watcher_test.go @@ -48,9 +48,9 @@ func TestUpdateConfigWatchHandler(t *testing.T) { { name: "Successfully update config", channels: []runtime.Object{ - makechannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))), - makechannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), - makechannel("chan-3", "donotwatch", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), + makeChannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))), + makeChannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), + makeChannel("chan-3", "donotwatch", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), }, expectedConfig: &multichannelfanout.Config{ ChannelConfigs: []multichannelfanout.ChannelConfig{ @@ -147,7 +147,7 @@ func getClientMocks(listError error) controllertesting.Mocks { return controllertesting.Mocks{} } -func makechannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) *v1alpha1.Channel { +func makeChannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) *v1alpha1.Channel { c := v1alpha1.Channel{ Spec: v1alpha1.ChannelSpec{ Subscribable: subscribable, diff --git a/pkg/provisioners/message_receiver.go b/pkg/provisioners/message_receiver.go index 4c9bac7f93a..175c796762b 100644 --- a/pkg/provisioners/message_receiver.go +++ b/pkg/provisioners/message_receiver.go @@ -41,15 +41,15 @@ type MessageReceiver struct { hostToChannelFunc ResolveChannelFromHostFunc } -// ReceiverOptions provides functional options to MessageReceiver function +// ReceiverOptions provides functional options to MessageReceiver function. type ReceiverOptions func(*MessageReceiver) error // ResolveChannelFromHostFunc function enables MessageReceiver to get the Channel Reference from incoming request HostHeader -// before calling receiverFunc +// before calling receiverFunc. type ResolveChannelFromHostFunc func(string) (ChannelReference, error) // ResolveChannelFromHostHeader is a ReceiverOption for NewMessageReceiver which enables the caller to overwrite the -// default behaviour defined by ParseChannel function +// default behaviour defined by ParseChannel function. func ResolveChannelFromHostHeader(hostToChannelFunc ResolveChannelFromHostFunc) ReceiverOptions { return func(r *MessageReceiver) error { r.hostToChannelFunc = hostToChannelFunc diff --git a/pkg/sidecar/fanout/fanout_handler_test.go b/pkg/sidecar/fanout/fanout_handler_test.go index 1163144c8e4..95e4752b1c6 100644 --- a/pkg/sidecar/fanout/fanout_handler_test.go +++ b/pkg/sidecar/fanout/fanout_handler_test.go @@ -227,8 +227,7 @@ func TestFanoutHandler_ServeHTTP(t *testing.T) { h, err := NewHandler(zap.NewNop(), Config{Subscriptions: subs}) if err != nil { - t.Errorf("NewHandler failed. Error:%s", err) - t.FailNow() + t.Fatalf("NewHandler failed. Error:%s", err) } if tc.asyncHandler { h.config.AsyncHandler = true @@ -236,8 +235,7 @@ func TestFanoutHandler_ServeHTTP(t *testing.T) { if tc.receiverFunc != nil { receiver, err := provisioners.NewMessageReceiver(tc.receiverFunc, zap.NewNop().Sugar()) if err != nil { - t.Errorf("NewMessageReceiver failed. Error:%s", err) - t.FailNow() + t.Fatalf("NewMessageReceiver failed. Error:%s", err) } h.receiver = receiver } diff --git a/pkg/sidecar/multichannelfanout/config.go b/pkg/sidecar/multichannelfanout/config.go index 1c3ca420def..77f97a2e807 100644 --- a/pkg/sidecar/multichannelfanout/config.go +++ b/pkg/sidecar/multichannelfanout/config.go @@ -35,7 +35,7 @@ type ChannelConfig struct { FanoutConfig fanout.Config `json:"fanoutConfig"` } -// NewConfigFromChannels creates a new Config from the list of channels +// NewConfigFromChannels creates a new Config from the list of channels. func NewConfigFromChannels(channels []v1alpha1.Channel) *Config { cc := make([]ChannelConfig, 0) for _, c := range channels { diff --git a/pkg/sidecar/multichannelfanout/config_test.go b/pkg/sidecar/multichannelfanout/config_test.go index a6d3d5ed782..0d7afe8d09f 100644 --- a/pkg/sidecar/multichannelfanout/config_test.go +++ b/pkg/sidecar/multichannelfanout/config_test.go @@ -14,16 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package multichannelfanout provides an http.Handler that takes in one request to a Knative -// Channel and fans it out to N other requests. Logically, it represents multiple Knative Channels. -// It is made up of a map, map[channel]fanout.Handler and each incoming request is inspected to -// determine which Channel it is on. This Handler delegates the HTTP handling to the fanout.Handler -// corresponding to the incoming request's Channel. -// It is often used in conjunction with a swappable.Handler. The swappable.Handler delegates all its -// requests to the multichannelfanout.Handler. When a new configuration is available, a new -// multichannelfanout.Handler is created and swapped in for all subsequent requests. The old -// multichannelfanout.Handler is discarded. - package multichannelfanout import ( @@ -53,7 +43,7 @@ func TestNewConfigFromChannels(t *testing.T) { }, { name: "one channel with no subscribers", channels: []v1alpha1.Channel{ - makechannel("chan-1", "ns-1", "a.b.c.d", nil), + makeChannel("chan-1", "ns-1", "a.b.c.d", nil), }, expected: &Config{ ChannelConfigs: []ChannelConfig{ @@ -67,8 +57,8 @@ func TestNewConfigFromChannels(t *testing.T) { }, { name: "multiple channels with subscribers", channels: []v1alpha1.Channel{ - makechannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))), - makechannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), + makeChannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))), + makeChannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), }, expected: &Config{ ChannelConfigs: []ChannelConfig{ @@ -108,7 +98,7 @@ func TestNewConfigFromChannels(t *testing.T) { } } -func makechannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) v1alpha1.Channel { +func makeChannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) v1alpha1.Channel { c := v1alpha1.Channel{ Spec: v1alpha1.ChannelSpec{ Subscribable: subscribable, From d71fecf9fa2a53eda24c61beaff532198f69216d Mon Sep 17 00:00:00 2001 From: akashrv Date: Thu, 18 Apr 2019 14:56:35 -0700 Subject: [PATCH 22/26] Added back permission that was removed by mistake --- contrib/kafka/config/kafka.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index 990c7aa20cc..31506c804de 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -67,6 +67,14 @@ rules: - kafka-channel-dispatcher verbs: - update + - apiGroups: + - "" # Core API Group. + resources: + - events + verbs: + - create + - patch + - update --- apiVersion: rbac.authorization.k8s.io/v1 From 6f5d4f8fc51c789f0d1c2bad0e06e064a4035cc3 Mon Sep 17 00:00:00 2001 From: akashrv Date: Thu, 18 Apr 2019 16:45:59 -0700 Subject: [PATCH 23/26] Remove istio references --- contrib/kafka/main.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/contrib/kafka/main.go b/contrib/kafka/main.go index ed98481c20b..62df224cc98 100644 --- a/contrib/kafka/main.go +++ b/contrib/kafka/main.go @@ -8,7 +8,6 @@ import ( "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -47,7 +46,6 @@ func main() { // Add custom types to this array to get them into the manager's scheme. schemeFuncs := []SchemeFunc{ eventingv1alpha.AddToScheme, - istiov1alpha3.AddToScheme, } for _, schemeFunc := range schemeFuncs { schemeFunc(mgr.GetScheme()) From 9f53403a0ac6c1b94de585a9ed46649bc6dc0135 Mon Sep 17 00:00:00 2001 From: akashrv Date: Thu, 18 Apr 2019 17:05:52 -0700 Subject: [PATCH 24/26] Removed one more reference of istio --- contrib/kafka/cmd/controller/main.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index 375361f4af3..be99d7231ef 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -8,7 +8,6 @@ import ( "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -47,7 +46,6 @@ func _main() int { // Add custom types to this array to get them into the manager's scheme. schemeFuncs := []SchemeFunc{ eventingv1alpha.AddToScheme, - istiov1alpha3.AddToScheme, } for _, schemeFunc := range schemeFuncs { schemeFunc(mgr.GetScheme()) From a2e3d81b46d7aca8867d4fe66d122085a3abda02 Mon Sep 17 00:00:00 2001 From: akashrv Date: Fri, 19 Apr 2019 12:17:40 -0700 Subject: [PATCH 25/26] Updates based on PR comments. Ready to merge into master --- contrib/kafka/pkg/dispatcher/dispatcher.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index 0cc1b51cdf6..33204e1eead 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -103,9 +103,18 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error if diff := d.configDiff(config); diff != "" { d.logger.Info("Updating config (-old +new)", zap.String("diff", diff)) + // Create hostToChannelMap before updating kafkaConsumers. + // But update the map only after updating kafkaConsumers. + hcMap, err := createHostToChannelMap(config) + if err != nil { + return err + } + newSubs := make(map[subscription]bool) - // Subscribe to new subscriptions + // Subscribe to new subscriptions. + // TODO: Error returned by subscribe/unsubscribe must be handled. + // https://github.com/knative/eventing/issues/1072. for _, cc := range config.ChannelConfigs { channelRef := provisioners.ChannelReference{ Name: cc.Name, @@ -131,11 +140,8 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error } } } - - hcMap, err := createHostToChannelMap(config) - if err != nil { - return err - } + // At this point all updates are done and hostToChannelMap is created successfully. + // Update the atomic value. d.setHostToChannelMap(hcMap) // Update the config so that it can be used for comparison during next sync From d71b410e9ac04ebb4c7413dc3aeb871dfe950f88 Mon Sep 17 00:00:00 2001 From: akashrv Date: Fri, 19 Apr 2019 12:35:11 -0700 Subject: [PATCH 26/26] Fixed a typo --- contrib/kafka/pkg/dispatcher/dispatcher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index 33204e1eead..718abd88584 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -85,7 +85,7 @@ type subscription struct { ReplyURI string } -// ConfigDiff diffs the new config with the existing config. If there are no differences, then the +// configDiff diffs the new config with the existing config. If there are no differences, then the // empty string is returned. If there are differences, then a non-empty string is returned // describing the differences. func (d *KafkaDispatcher) configDiff(updated *multichannelfanout.Config) string {