From 99fddec90b5249f5246eb2a1793c5e51d0ec96b7 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Fri, 5 Apr 2019 10:13:12 -0700 Subject: [PATCH 01/37] WIP --- cmd/fanoutsidecar/channelwatcher.go | 59 +++ cmd/fanoutsidecar/main.go | 141 ++++--- pkg/provisioners/channel_util.go | 5 +- .../filesystem/filesystem_watcher.go | 126 ------ .../filesystem/filesystem_watcher_test.go | 379 ------------------ pkg/sidecar/configmap/parse.go | 54 --- pkg/sidecar/configmap/parse_test.go | 213 ---------- pkg/sidecar/configmap/watcher/watcher.go | 49 --- pkg/sidecar/configmap/watcher/watcher_test.go | 125 ------ .../multi_channel_fanout_handler.go | 28 +- 10 files changed, 147 insertions(+), 1032 deletions(-) create mode 100644 cmd/fanoutsidecar/channelwatcher.go delete mode 100644 pkg/sidecar/configmap/filesystem/filesystem_watcher.go delete mode 100644 pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go delete mode 100644 pkg/sidecar/configmap/parse.go delete mode 100644 pkg/sidecar/configmap/parse_test.go delete mode 100644 pkg/sidecar/configmap/watcher/watcher.go delete mode 100644 pkg/sidecar/configmap/watcher/watcher_test.go diff --git a/cmd/fanoutsidecar/channelwatcher.go b/cmd/fanoutsidecar/channelwatcher.go new file mode 100644 index 00000000000..d29884e43d7 --- /dev/null +++ b/cmd/fanoutsidecar/channelwatcher.go @@ -0,0 +1,59 @@ +package main + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/logging" + "go.uber.org/zap" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error + +type reconciler struct { + client client.Client + logger *zap.Logger + handler WatchHandlerFunc +} + +func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) { + ctx := logging.WithLogger(context.TODO(), r.logger.With(zap.Any("request", req))) + r.logger.Info("New update for channel.") + if err := r.handler(ctx, r.client, req.NamespacedName); err != nil { + r.logger.Error("WatchHandlerFunc returned error", zap.Error(err)) + return reconcile.Result{}, err + } + return reconcile.Result{}, nil +} + +func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) error { + c, err := controller.New("ChannelWatcher", mgr, controller.Options{ + Reconciler: &reconciler{ + client: mgr.GetClient(), + logger: logger, + handler: watchHandler, + }, + }) + if err != nil { + logger.Error("Unable to create controller for channelwatcher.", zap.Error(err)) + return err + } + + // Watch Channels. + err = c.Watch(&source.Kind{ + Type: &v1alpha1.Channel{}, + }, &handler.EnqueueRequestForObject{}) + if err != nil { + logger.Error("Unable to watch Channels.", zap.Error(err), zap.Any("type", &v1alpha1.Channel{})) + return err + } + return nil +} diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 59e8ce8892b..52a71d65b75 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -25,49 +25,46 @@ import ( "fmt" "log" "net/http" - "strings" "time" - "github.com/knative/eventing/pkg/sidecar/configmap/filesystem" - "github.com/knative/eventing/pkg/sidecar/configmap/watcher" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/sidecar/fanout" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/sidecar/swappable" - "github.com/knative/eventing/pkg/utils" - "github.com/knative/pkg/system" "go.uber.org/zap" "go.uber.org/zap/zapcore" - "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" -) - -const ( - defaultConfigMapName = "in-memory-channel-dispatcher-config-map" - // The following are the only valid values of the config_map_noticer flag. - cmnfVolume = "volume" - cmnfWatcher = "watcher" + // uncomment this line to debug in GKE from local machine + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) var ( readTimeout = 1 * time.Minute writeTimeout = 1 * time.Minute - port int - configMapNoticer string - configMapNamespace string - configMapName string + port int + channelProvisioners listFlags ) -func init() { - flag.IntVar(&port, "sidecar_port", -1, "The port to run the sidecar on.") - flag.StringVar(&configMapNoticer, "config_map_noticer", "", fmt.Sprintf("The system to notice changes to the ConfigMap. Valid values are: %s", configMapNoticerValues())) - flag.StringVar(&configMapNamespace, "config_map_namespace", system.Namespace(), "The namespace of the ConfigMap that is watched for configuration.") - flag.StringVar(&configMapName, "config_map_name", defaultConfigMapName, "The name of the ConfigMap that is watched for configuration.") +type listFlags []string + +func (l *listFlags) String() string { + return "" +} +func (l *listFlags) Set(value string) error { + *l = append(*l, value) + return nil } -func configMapNoticerValues() string { - return strings.Join([]string{cmnfVolume, cmnfWatcher}, ", ") +func init() { + flag.IntVar(&port, "sidecar_port", -1, "The port to run the sidecar on.") + flag.Var(&channelProvisioners, "channel_provisioners", "The provisioner of the channels that will be watched.") } func main() { @@ -84,14 +81,18 @@ func main() { logger.Fatal("--sidecar_port flag must be set") } + if len(channelProvisioners) < 1 { + logger.Fatal("--channel_provisioners must be specified") + } + sh, err := swappable.NewEmptyHandler(logger) if err != nil { logger.Fatal("Unable to create swappable.Handler", zap.Error(err)) } - mgr, err := setupConfigMapNoticer(logger, sh.UpdateConfig) + mgr, err := setupChannelWatcher(logger, sh.UpdateConfig) if err != nil { - logger.Fatal("Unable to create configMap noticer.", zap.Error(err)) + logger.Fatal("Unable to create channel watcher.", zap.Error(err)) } s := &http.Server{ @@ -125,57 +126,73 @@ func main() { } } -func setupConfigMapNoticer(logger *zap.Logger, configUpdated swappable.UpdateConfig) (manager.Manager, error) { - mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) - if err != nil { - logger.Error("Error starting manager.", zap.Error(err)) - return nil, err - } - - switch configMapNoticer { - case cmnfVolume: - err = setupConfigMapVolume(logger, mgr, configUpdated) - case cmnfWatcher: - err = setupConfigMapWatcher(logger, mgr, configUpdated) - default: - err = fmt.Errorf("need to provide the --config_map_noticer flag (valid values are %s)", configMapNoticerValues()) - } +func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfig) (manager.Manager, error) { + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) // TODO: Add scheme if err != nil { + logger.Error("Error creating new maanger.", zap.Error(err)) return nil, err } + v1alpha1.AddToScheme(mgr.GetScheme()) + New(mgr, logger, updateChannelConfig(configUpdated)) return mgr, nil } -func setupConfigMapVolume(logger *zap.Logger, mgr manager.Manager, configUpdated swappable.UpdateConfig) error { - cmn, err := filesystem.NewConfigMapWatcher(logger, filesystem.ConfigDir, configUpdated) - if err != nil { - logger.Error("Unable to create filesystem.ConifgMapWatcher", zap.Error(err)) - return err +func updateChannelConfig(updateConfig swappable.UpdateConfig) WatchHandlerFunc { + return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { + channels, err := listAllChannels(ctx, c) + if err != nil { + logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) + return err + } + config := multiChannelFanoutConfig(channels) + return updateConfig(config) } - if err = mgr.Add(cmn); err != nil { - logger.Error("Unable to add the config map watcher", zap.Error(err)) - return err - } - return nil } -func setupConfigMapWatcher(logger *zap.Logger, mgr manager.Manager, configUpdated swappable.UpdateConfig) error { - kc, err := kubernetes.NewForConfig(mgr.GetConfig()) - if err != nil { - return err +func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) { + channels := make([]v1alpha1.Channel, 0) + cl := &v1alpha1.ChannelList{} + if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { + return nil, err + } + for _, c := range cl.Items { + if c.Status.IsReady() && shouldWatch(&c) { + channels = append(channels, c) + } } + return channels, nil +} - cmw, err := watcher.NewWatcher(logger, kc, configMapNamespace, configMapName, configUpdated) - if err != nil { - return err +func shouldWatch(ch *v1alpha1.Channel) bool { + if ch.Spec.Provisioner != nil && ch.Spec.Provisioner.Namespace == "" { + for _, v := range channelProvisioners { + if v == ch.Spec.Provisioner.Name { + return true + } + } } + return false +} - if err = mgr.Add(utils.NewBlockingStart(logger, cmw)); err != nil { - logger.Error("Unable to add the config map watcher", zap.Error(err)) - return err +func multiChannelFanoutConfig(channels []v1alpha1.Channel) *multichannelfanout.Config { + cc := make([]multichannelfanout.ChannelConfig, 0) + for _, c := range channels { + channelConfig := multichannelfanout.ChannelConfig{ + Namespace: c.Namespace, + Name: c.Name, + HostName: c.Status.Address.Hostname, + } + if c.Spec.Subscribable != nil { + channelConfig.FanoutConfig = fanout.Config{ + Subscriptions: c.Spec.Subscribable.Subscribers, + } + } + cc = append(cc, channelConfig) + } + return &multichannelfanout.Config{ + ChannelConfigs: cc, } - return nil } // runnableServer is a small wrapper around http.Server so that it matches the manager.Runnable diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index a4261fe8a1e..a6a58011042 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -73,9 +73,8 @@ func CreateK8sService(ctx context.Context, client runtimeClient.Client, c *event func getK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel) (*corev1.Service, error) { list := &corev1.ServiceList{} opts := &runtimeClient.ListOptions{ - Namespace: c.Namespace, - // TODO After the full release start selecting on new set of labels by using k8sServiceLabels(c) - LabelSelector: labels.SelectorFromSet(k8sOldServiceLabels(c)), + Namespace: c.Namespace, + LabelSelector: labels.SelectorFromSet(k8sServiceLabels(c)), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, diff --git a/pkg/sidecar/configmap/filesystem/filesystem_watcher.go b/pkg/sidecar/configmap/filesystem/filesystem_watcher.go deleted file mode 100644 index 12f5042d51e..00000000000 --- a/pkg/sidecar/configmap/filesystem/filesystem_watcher.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package filesystem - -import ( - "errors" - - "github.com/fsnotify/fsnotify" - sidecarconfigmap "github.com/knative/eventing/pkg/sidecar/configmap" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "github.com/knative/eventing/pkg/sidecar/swappable" - "github.com/knative/pkg/configmap" - "go.uber.org/zap" -) - -const ( - // ConfigDir is the mount path of the configMap volume. - ConfigDir = "/etc/config/fanout_sidecar" -) - -// Monitors an attached ConfigMap volume for updated configuration and calls `configUpdated` when -// the value changes. -type ConfigMapWatcher struct { - logger *zap.Logger - // The directory to read the configMap from. - dir string - // Stop the watcher by closing this channel. - watcherStopCh chan<- bool - - // The function to call when the configuration is updated. - configUpdated swappable.UpdateConfig -} - -// NewConfigMapWatcher creates a new filesystem.ConfigMapWatcher. The caller is responsible for -// calling Start(<-chan), likely via a controller-runtime Manager. -func NewConfigMapWatcher(logger *zap.Logger, dir string, updateConfig swappable.UpdateConfig) (*ConfigMapWatcher, error) { - conf, err := readConfigMap(logger, dir) - if err != nil { - logger.Error("Unable to read configMap", zap.Error(err)) - return nil, err - } - - logger.Info("Read initial configMap", zap.Any("conf", conf)) - - err = updateConfig(conf) - if err != nil { - logger.Error("Unable to use the initial configMap: %v", zap.Error(err)) - return nil, err - } - - cmw := &ConfigMapWatcher{ - logger: logger, - dir: dir, - configUpdated: updateConfig, - } - return cmw, nil -} - -// readConfigMap attempts to read the configMap from the attached volume. -func readConfigMap(logger *zap.Logger, dir string) (*multichannelfanout.Config, error) { - cm, err := configmap.Load(dir) - if err != nil { - return nil, err - } - return sidecarconfigmap.NewFanoutConfig(logger, cm) -} - -// updateConfig reads the configMap data and calls `configUpdated` with the updated value. -func (cmw *ConfigMapWatcher) updateConfig() { - conf, err := readConfigMap(cmw.logger, cmw.dir) - if err != nil { - cmw.logger.Error("Unable to read the configMap", zap.Error(err)) - return - } - err = cmw.configUpdated(conf) - if err != nil { - cmw.logger.Error("Unable to update config", zap.Error(err)) - return - } -} - -// Start implements controller runtime's manager.Runnable. -func (cmw *ConfigMapWatcher) Start(stopCh <-chan struct{}) error { - watcher, err := fsnotify.NewWatcher() - if err != nil { - return err - } - - err = watcher.Add(cmw.dir) - if err != nil { - return err - } - - for { - select { - case _, ok := <-watcher.Events: - if !ok { - // Channel closed. - return errors.New("watcher.Events channel closed") - } - cmw.updateConfig() - case e, ok := <-watcher.Errors: - if !ok { - // Channel closed. - return errors.New("watcher.Errors channel closed") - } - cmw.logger.Error("watcher.Errors", zap.Error(e)) - case <-stopCh: - return watcher.Close() - } - } -} diff --git a/pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go b/pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go deleted file mode 100644 index 84a0ac83912..00000000000 --- a/pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package filesystem - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "strings" - "sync" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" - "github.com/knative/eventing/pkg/sidecar/configmap" - "github.com/knative/eventing/pkg/sidecar/fanout" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "github.com/knative/eventing/pkg/utils" - "go.uber.org/zap" - yaml "gopkg.in/yaml.v2" -) - -func TestReadConfigMap(t *testing.T) { - testCases := []struct { - name string - createDir bool - config string - expected *multichannelfanout.Config - expectedErr bool - }{ - { - name: "dir does not exist", - createDir: false, - }, - { - name: "no data", - createDir: true, - expectedErr: true, - }, - { - name: "invalid YAML", - createDir: true, - config: ` - key: - - value - - different indent level - `, - expectedErr: true, - }, - { - name: "valid YAML -- invalid JSON", - config: "{ nil: Key }", - createDir: true, - expectedErr: true, - }, - { - name: "unknown field", - config: "{ channelConfigs: [ { not: a-defined-field } ] }", - createDir: true, - expectedErr: true, - }, - { - name: "valid", - createDir: true, - config: ` - channelConfigs: - - namespace: default - name: c1 - fanoutConfig: - subscriptions: - - subscriberURI: event-changer.default.svc.` + utils.GetClusterDomainName() + ` - replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` - - subscriberURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` - - replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` - - namespace: default - name: c2 - fanoutConfig: - subscriptions: - - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` - - namespace: other - name: c3 - fanoutConfig: - subscriptions: - - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName(), - expected: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "c1", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - SubscriberURI: "event-changer.default.svc." + utils.GetClusterDomainName(), - ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), - }, - { - SubscriberURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), - }, - { - ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), - }, - }, - }, - }, - { - Namespace: "default", - Name: "c2", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), - }, - }, - }, - }, - { - Namespace: "other", - Name: "c3", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), - }, - }, - }, - }, - }, - }, - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - var dir string - if tc.createDir { - var cleanup func() - dir, cleanup = createTempDir(t) - defer cleanup() - } else { - dir = "/tmp/doesNotExist" - } - writeConfigString(t, dir, tc.config) - c, e := readConfigMap(zap.NewNop(), dir) - if tc.expectedErr { - if e == nil { - t.Errorf("Expected an error, actual nil") - } - return - } - if !cmp.Equal(c, tc.expected) { - t.Errorf("Unexpected config. Expected '%v'. Actual '%v'.", tc.expected, c) - } - }) - } -} - -func TestWatch(t *testing.T) { - testCases := map[string]struct { - initialConfigErr error - initialConfig *multichannelfanout.Config - updateConfigErr error - updateConfig *multichannelfanout.Config - }{ - "error applying initial config": { - initialConfig: &multichannelfanout.Config{}, - initialConfigErr: errors.New("test-induced error"), - }, - "read initial config": { - initialConfig: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "c1", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "foo.bar", - }, - }, - }, - }, - }, - }, - }, - "error apply updated config": { - initialConfig: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "c1", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "foo.bar", - }, - }, - }, - }, - }, - }, - updateConfigErr: errors.New("test-induced error"), - }, - "update config": { - initialConfig: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "c1", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "foo.bar", - }, - }, - }, - }, - }, - }, - updateConfig: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "new-channel", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - SubscriberURI: "baz.qux", - }, - }, - }, - }, - }, - }, - }, - } - for n, tc := range testCases { - t.Run(n, func(t *testing.T) { - dir, cleanup := createTempDir(t) - defer cleanup() - writeConfig(t, dir, tc.initialConfig) - - cuc := &configUpdatedChecker{ - updateConfigErr: tc.initialConfigErr, - } - cmw, err := NewConfigMapWatcher(zap.NewNop(), dir, cuc.updateConfig) - if err != nil { - if tc.initialConfigErr != err { - t.Errorf("Unexpected error making ConfigMapWatcher. Expected: '%v'. Actual '%v'", tc.initialConfigErr, err) - } - return - } - ac := cuc.getConfig() - if !cmp.Equal(tc.initialConfig, ac) { - t.Errorf("Unexpected initial config. Expected '%v'. Actual '%v'", tc.initialConfig, ac) - } - - stopCh := make(chan struct{}) - go func() { - _ = cmw.Start(stopCh) - }() - defer func() { - close(stopCh) - }() - // Sadly, the test is flaky unless we sleep here, waiting for the file system - // watcher to truly start. - time.Sleep(100 * time.Millisecond) - - if tc.updateConfigErr != nil { - cuc.updateConfigErr = tc.updateConfigErr - } - - expected := tc.initialConfig - if tc.updateConfig != nil { - expected = tc.updateConfig - } - - cuc.updateCalled = make(chan struct{}, 1) - writeConfig(t, dir, expected) - // The watcher is running in another goroutine, give it some time to notice the - // change. - select { - case <-cuc.updateCalled: - break - case <-time.After(5 * time.Second): - t.Errorf("Time out waiting for watcher to notice change.") - } - - ac = cuc.getConfig() - if !cmp.Equal(ac, expected) { - t.Errorf("Unexpected update config. Expected '%v'. Actual '%v'", expected, ac) - } - }) - } -} - -type configUpdatedChecker struct { - configLock sync.Mutex - config *multichannelfanout.Config - updateCalled chan struct{} - updateConfigErr error -} - -func (cuc *configUpdatedChecker) updateConfig(config *multichannelfanout.Config) error { - cuc.configLock.Lock() - defer cuc.configLock.Unlock() - cuc.config = config - if cuc.updateCalled != nil { - cuc.updateCalled <- struct{}{} - } - return cuc.updateConfigErr -} - -func (cuc *configUpdatedChecker) getConfig() *multichannelfanout.Config { - cuc.configLock.Lock() - defer cuc.configLock.Unlock() - return cuc.config -} - -func createTempDir(t *testing.T) (string, func()) { - dir, err := ioutil.TempDir("", "configMapHandlerTest") - if err != nil { - t.Errorf("Unable to make temp directory: %v", err) - } - return dir, func() { - _ = os.RemoveAll(dir) - } -} - -func writeConfig(t *testing.T, dir string, config *multichannelfanout.Config) { - if config != nil { - yb, err := yaml.Marshal(config) - if err != nil { - t.Errorf("Unable to marshal the config") - } - writeConfigString(t, dir, string(yb)) - } -} - -func writeConfigString(t *testing.T, dir, config string) { - if config != "" { - // Golang editors tend to replace leading spaces with tabs. YAML is left whitespace - // sensitive, so let's replace the tabs with spaces. - leftSpaceConfig := strings.Replace(config, "\t", " ", -1) - err := atomicWriteFile(t, fmt.Sprintf("%s/%s", dir, configmap.MultiChannelFanoutConfigKey), []byte(leftSpaceConfig), 0700) - if err != nil { - t.Errorf("Problem writing the config file: %v", err) - } - } -} - -func atomicWriteFile(t *testing.T, file string, bytes []byte, perm os.FileMode) error { - // In order to more closely replicate how K8s writes ConfigMaps to the file system, we will - // atomically swap out the file by writing it to a temp directory, then renaming it into the - // directory we are watching. - tempDir, cleanup := createTempDir(t) - defer cleanup() - - tempFile := fmt.Sprintf("%s/%s", tempDir, "temp") - err := ioutil.WriteFile(tempFile, bytes, perm) - if err != nil { - return err - } - return os.Rename(tempFile, file) -} diff --git a/pkg/sidecar/configmap/parse.go b/pkg/sidecar/configmap/parse.go deleted file mode 100644 index ba6da64f12c..00000000000 --- a/pkg/sidecar/configmap/parse.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package configmap - -import ( - "encoding/json" - "fmt" - - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "go.uber.org/zap" -) - -const ( - // MultiChannelFanoutConfigKey is the key in the ConfigMap that contains all the configuration - // data. - MultiChannelFanoutConfigKey = "multiChannelFanoutConfig" -) - -// NewFanoutConfig attempts to parse the config map's data into a multichannelfanout.Config. -// orig == NewFanoutConfig(SerializeConfig(orig)) -func NewFanoutConfig(logger *zap.Logger, data map[string]string) (*multichannelfanout.Config, error) { - str, present := data[MultiChannelFanoutConfigKey] - if !present { - logger.Error("Expected key not found", zap.String("key", MultiChannelFanoutConfigKey)) - return nil, fmt.Errorf("expected key not found: %v", MultiChannelFanoutConfigKey) - } - return multichannelfanout.Parse(logger, str) -} - -// SerializeConfig takes in a multichannelfanout.Config and generates the ConfigMap equivalent. -// orig == NewFanoutConfig(SerializeConfig(orig)) -func SerializeConfig(config multichannelfanout.Config) (map[string]string, error) { - jb, err := json.Marshal(config) - if err != nil { - return nil, err - } - return map[string]string{ - MultiChannelFanoutConfigKey: string(jb), - }, nil -} diff --git a/pkg/sidecar/configmap/parse_test.go b/pkg/sidecar/configmap/parse_test.go deleted file mode 100644 index cee271ce090..00000000000 --- a/pkg/sidecar/configmap/parse_test.go +++ /dev/null @@ -1,213 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package configmap - -import ( - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" - "github.com/knative/eventing/pkg/sidecar/fanout" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "github.com/knative/eventing/pkg/utils" - "go.uber.org/zap" -) - -func TestNewFanoutConfig(t *testing.T) { - testCases := []struct { - name string - config string - expected *multichannelfanout.Config - expectedErr bool - }{ - { - name: "no data", - expectedErr: true, - }, - { - name: "invalid YAML", - config: ` - key: - - value - - different indent level - `, - expectedErr: true, - }, - { - name: "valid YAML -- invalid JSON", - config: "{ nil: Key }", - expectedErr: true, - }, - { - name: "unknown field", - config: "{ channelConfigs: [ { not: a-defined-field } ] }", - expectedErr: true, - }, - { - name: "valid", - config: ` - channelConfigs: - - namespace: default - name: c1 - fanoutConfig: - subscriptions: - - subscriberURI: event-changer.default.svc.` + utils.GetClusterDomainName() + ` - replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` - - subscriberURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` - - replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` - - namespace: default - name: c2 - fanoutConfig: - subscriptions: - - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` - - namespace: other - name: c3 - fanoutConfig: - subscriptions: - - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName(), - expected: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "c1", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - SubscriberURI: "event-changer.default.svc." + utils.GetClusterDomainName(), - ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), - }, - { - SubscriberURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), - }, - { - ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), - }, - }, - }, - }, - { - Namespace: "default", - Name: "c2", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), - }, - }, - }, - }, - { - Namespace: "other", - Name: "c3", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), - }, - }, - }, - }, - }, - }, - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - data := formatData(tc.config) - c, e := NewFanoutConfig(zap.NewNop(), data) - if tc.expectedErr { - if e == nil { - t.Errorf("Expected an error, actual nil") - } - return - } - if !cmp.Equal(c, tc.expected) { - t.Errorf("Unexpected config. Expected '%v'. Actual '%v'.", tc.expected, c) - } - }) - } -} - -func TestSerializeConfig(t *testing.T) { - testCases := map[string]struct { - config *multichannelfanout.Config - }{ - "empty config": { - config: &multichannelfanout.Config{}, - }, - "full config": { - config: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "default", - Name: "c1", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - SubscriberURI: "foo.example.com", - ReplyURI: "bar.example.com", - }, - { - ReplyURI: "qux.example.com", - }, - { - SubscriberURI: "baz.example.com", - }, - {}, - }, - }, - }, - { - Namespace: "other", - Name: "no-subs", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{}, - }, - }, - }, - }, - }, - } - - for n, tc := range testCases { - t.Run(n, func(t *testing.T) { - s, err := SerializeConfig(*tc.config) - if err != nil { - t.Errorf("Unexpected error serializing config: %v", err) - } - rt, err := NewFanoutConfig(zap.NewNop(), s) - if err != nil { - t.Errorf("Unexpected error deserializing: %v", err) - } - if diff := cmp.Diff(tc.config, rt); diff != "" { - t.Errorf("Unexpected error roundtripping the config (-want, +got): %v", diff) - } - }) - } -} - -func formatData(config string) map[string]string { - data := make(map[string]string) - if config != "" { - // Golang editors tend to replace leading spaces with tabs. YAML is left whitespace - // sensitive and disallows tabs, so let's replace the tabs with four spaces. - leftSpaceConfig := strings.Replace(config, "\t", " ", -1) - data[MultiChannelFanoutConfigKey] = leftSpaceConfig - } - return data -} diff --git a/pkg/sidecar/configmap/watcher/watcher.go b/pkg/sidecar/configmap/watcher/watcher.go deleted file mode 100644 index 01dc5d7af9a..00000000000 --- a/pkg/sidecar/configmap/watcher/watcher.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package watcher - -import ( - sidecarconfigmap "github.com/knative/eventing/pkg/sidecar/configmap" - "github.com/knative/eventing/pkg/sidecar/swappable" - "github.com/knative/pkg/configmap" - "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -// NewWatcher creates a new InformedWatcher that watches the specified ConfigMap and on any change -// that results in a valid multichannelfanout.Config calls configUpdated. -func NewWatcher(logger *zap.Logger, kc kubernetes.Interface, cmNamespace, cmName string, configUpdated swappable.UpdateConfig) (manager.Runnable, error) { - iw := configmap.NewInformedWatcher(kc, cmNamespace) - iw.Watch(cmName, func(cm *corev1.ConfigMap) { - config, err := sidecarconfigmap.NewFanoutConfig(logger, cm.Data) - if err != nil { - logger.Error("Could not parse ConfigMap", zap.Error(err), - zap.Any("configMap.Data", cm.Data)) - return - } - - err = configUpdated(config) - if err != nil { - logger.Error("Unable to update config", zap.Error(err)) - return - } - }) - - return iw, nil -} diff --git a/pkg/sidecar/configmap/watcher/watcher_test.go b/pkg/sidecar/configmap/watcher/watcher_test.go deleted file mode 100644 index 6164c38cd63..00000000000 --- a/pkg/sidecar/configmap/watcher/watcher_test.go +++ /dev/null @@ -1,125 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package watcher - -import ( - "errors" - "testing" - - "github.com/google/go-cmp/cmp" - eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" - sidecarconfigmap "github.com/knative/eventing/pkg/sidecar/configmap" - "github.com/knative/eventing/pkg/sidecar/fanout" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "github.com/knative/pkg/configmap" - "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - namespace = "test-namespace" - name = "test-name" -) - -func TestReconcile(t *testing.T) { - testCases := map[string]struct { - config map[string]string - updateConfigErr error - expectedConfig *multichannelfanout.Config - }{ - "missing key": { - config: map[string]string{}, - expectedConfig: nil, - }, - "cannot parse cm": { - config: map[string]string{ - sidecarconfigmap.MultiChannelFanoutConfigKey: "invalid config", - }, - expectedConfig: nil, - }, - "configUpdated fails": { - config: map[string]string{ - sidecarconfigmap.MultiChannelFanoutConfigKey: "", - }, - updateConfigErr: errors.New("test-error"), - expectedConfig: &multichannelfanout.Config{}, - }, - "success": { - config: map[string]string{ - sidecarconfigmap.MultiChannelFanoutConfigKey: ` - channelConfigs: - - name: foo - namespace: bar - fanoutConfig: - subscriptions: - - subscriberURI: subscriber - replyURI: reply`, - }, - expectedConfig: &multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Name: "foo", - Namespace: "bar", - FanoutConfig: fanout.Config{ - Subscriptions: []eventingduck.ChannelSubscriberSpec{ - { - SubscriberURI: "subscriber", - ReplyURI: "reply", - }, - }, - }, - }, - }, - }, - }, - } - for n, tc := range testCases { - t.Run(n, func(t *testing.T) { - cuc := &configUpdatedChecker{ - updateConfigErr: tc.updateConfigErr, - } - - r, err := NewWatcher(zap.NewNop(), nil, namespace, name, cuc.updateConfig) - if err != nil { - t.Errorf("Error creating watcher: %v", err) - } - iw := r.(*configmap.InformedWatcher) - iw.OnChange(&corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - }, - Data: tc.config, - }) - - if diff := cmp.Diff(tc.expectedConfig, cuc.config); diff != "" { - t.Errorf("Unexpected config (-want +got): %v", diff) - } - }) - } -} - -type configUpdatedChecker struct { - config *multichannelfanout.Config - updateConfigErr error -} - -func (cuc *configUpdatedChecker) updateConfig(config *multichannelfanout.Config) error { - cuc.config = config - return cuc.updateConfigErr -} diff --git a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go index 8872b7026ac..a2f24cbc6d8 100644 --- a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go +++ b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go @@ -30,7 +30,6 @@ import ( "net/http" "github.com/google/go-cmp/cmp" - "github.com/knative/eventing/pkg/provisioners" "github.com/knative/eventing/pkg/sidecar/fanout" "go.uber.org/zap" ) @@ -43,29 +42,21 @@ type Config struct { // ChannelConfig is the configuration for a single Channel. type ChannelConfig struct { - Namespace string `json:"namespace"` - Name string `json:"name"` + Namespace string `json:"namespace"` + Name string `json:"name"` + HostName string FanoutConfig fanout.Config `json:"fanoutConfig"` } -// MakeChannelKey creates the key used for this Channel in the Handler's handlers map. -func makeChannelKey(namespace, name string) string { - return fmt.Sprintf("%s/%s", namespace, name) -} - // makeChannelKeyFromConfig creates the channel key for a given channelConfig. It is a helper around // MakeChannelKey. func makeChannelKeyFromConfig(config ChannelConfig) string { - return makeChannelKey(config.Namespace, config.Name) + return config.HostName } // getChannelKey extracts the channel key from the given HTTP request. -func getChannelKey(r *http.Request) (string, error) { - cr, err := provisioners.ParseChannel(r.Host) - if err != nil { - return "", err - } - return makeChannelKey(cr.Namespace, cr.Name), nil +func getChannelKey(r *http.Request) string { + return r.Host } // Handler is an http.Handler that introspects the incoming request to determine what Channel it is @@ -114,12 +105,7 @@ func (h *Handler) CopyWithNewConfig(conf Config) (*Handler, error) { // ServeHTTP delegates the actual handling of the request to a fanout.Handler, based on the // request's channel key. func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - channelKey, err := getChannelKey(r) - if err != nil { - h.logger.Error("Unable to extract channelKey", zap.Error(err)) - w.WriteHeader(http.StatusInternalServerError) - return - } + channelKey := getChannelKey(r) fh, ok := h.handlers[channelKey] if !ok { h.logger.Error("Unable to find a handler for request", zap.String("channelKey", channelKey)) From c642ceafe38fcf775d3a65a8794b9a3598b79049 Mon Sep 17 00:00:00 2001 From: akashrv Date: Sat, 6 Apr 2019 10:37:40 -0700 Subject: [PATCH 02/37] WIP - In-memory working with E2E tests --- cmd/fanoutsidecar/main.go | 9 +- .../in-memory-channel/in-memory-channel.yaml | 27 +----- .../pkg/controller/channel/reconcile.go | 14 +-- .../channelwatcher}/channelwatcher.go | 2 +- pkg/provisioners/channel_util.go | 10 +- .../inmemory/channel/controller.go | 19 +--- .../inmemory/channel/reconcile.go | 92 ++----------------- pkg/provisioners/inmemory/controller/main.go | 3 + .../v1alpha1/broker/resources/ingress.go | 6 +- pkg/sidecar/swappable/swappable.go | 4 + test/crd.go | 5 +- 11 files changed, 37 insertions(+), 154 deletions(-) rename {cmd/fanoutsidecar => pkg/channelwatcher}/channelwatcher.go (98%) diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 52a71d65b75..9787fdfaecb 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -28,6 +28,7 @@ import ( "time" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/channelwatcher" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/sidecar/fanout" "github.com/knative/eventing/pkg/sidecar/multichannelfanout" @@ -64,7 +65,7 @@ func (l *listFlags) Set(value string) error { func init() { flag.IntVar(&port, "sidecar_port", -1, "The port to run the sidecar on.") - flag.Var(&channelProvisioners, "channel_provisioners", "The provisioner of the channels that will be watched.") + flag.Var(&channelProvisioners, "channel_provisioner", "The provisioner of the channels that will be watched.") } func main() { @@ -127,18 +128,18 @@ func main() { } func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfig) (manager.Manager, error) { - mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) // TODO: Add scheme + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) if err != nil { logger.Error("Error creating new maanger.", zap.Error(err)) return nil, err } v1alpha1.AddToScheme(mgr.GetScheme()) - New(mgr, logger, updateChannelConfig(configUpdated)) + channelwatcher.New(mgr, logger, updateChannelConfig(configUpdated)) return mgr, nil } -func updateChannelConfig(updateConfig swappable.UpdateConfig) WatchHandlerFunc { +func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.WatchHandlerFunc { return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { channels, err := listAllChannels(ctx, c) if err != nil { diff --git a/config/provisioners/in-memory-channel/in-memory-channel.yaml b/config/provisioners/in-memory-channel/in-memory-channel.yaml index e0191da4081..cec26e323e3 100644 --- a/config/provisioners/in-memory-channel/in-memory-channel.yaml +++ b/config/provisioners/in-memory-channel/in-memory-channel.yaml @@ -87,8 +87,6 @@ rules: - "" # Core API Group. resources: - configmaps - resourceNames: - - in-memory-channel-dispatcher-config-map verbs: - update - apiGroups: @@ -168,9 +166,10 @@ metadata: name: in-memory-channel-dispatcher rules: - apiGroups: - - "" # Core API group. + - "eventing.knative.dev" resources: - - configmaps + - "channels" + - "channels/status" verbs: - get - list @@ -206,8 +205,6 @@ spec: role: dispatcher template: metadata: - annotations: - sidecar.istio.io/inject: "true" labels: *labels spec: serviceAccountName: in-memory-channel-dispatcher @@ -216,24 +213,10 @@ spec: image: github.com/knative/eventing/cmd/fanoutsidecar args: - --sidecar_port=8080 - - --config_map_noticer=watcher - - --config_map_namespace=knative-eventing - - --config_map_name=in-memory-channel-dispatcher-config-map + - --channel_provisioner=in-memory + - --channel_provisioner=in-memory-channel env: - name: SYSTEM_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - ---- - -# Create the ConfigMap, because if we don't the dispatcher will flap when it first comes online and -# this can cause the integration tests to fail. - -apiVersion: v1 -kind: ConfigMap -metadata: - name: in-memory-channel-dispatcher-config-map - namespace: knative-eventing -data: - multiChannelFanoutConfig: '{}' diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile.go b/contrib/gcppubsub/pkg/controller/channel/reconcile.go index b446f11de3c..a1836d83e8a 100644 --- a/contrib/gcppubsub/pkg/controller/channel/reconcile.go +++ b/contrib/gcppubsub/pkg/controller/channel/reconcile.go @@ -121,9 +121,6 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } logging.FromContext(ctx).Info("Reconciling Channel") - // Modify a copy, not the original. - c = c.DeepCopy() - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With(zap.Any("channel", c))) requeue, reconcileErr := r.reconcile(ctx, c) if reconcileErr != nil { @@ -163,9 +160,8 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) // We are syncing four things: // 1. The K8s Service to talk to this Channel. - // 2. The Istio VirtualService to talk to this Channel. - // 3. The GCP PubSub Topic (one for the Channel). - // 4. The GCP PubSub Subscriptions (one for each Subscriber of the Channel). + // 2. The GCP PubSub Topic (one for the Channel). + // 3. The GCP PubSub Subscriptions (one for each Subscriber of the Channel). // First we will plan all the names out for steps 3 and 4 persist them to status.internal. Then, on a // subsequent reconcile, we manipulate all the GCP resources in steps 3 and 4. @@ -237,12 +233,6 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) return false, err } - err = r.createVirtualService(ctx, c, svc) - if err != nil { - r.recorder.Eventf(c, v1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err) - return false, err - } - topic, err := r.createTopic(ctx, plannedPCS, gcpCreds) if err != nil { r.recorder.Eventf(c, v1.EventTypeWarning, topicCreateFailed, "Failed to reconcile Topic for the Channel: %v", err) diff --git a/cmd/fanoutsidecar/channelwatcher.go b/pkg/channelwatcher/channelwatcher.go similarity index 98% rename from cmd/fanoutsidecar/channelwatcher.go rename to pkg/channelwatcher/channelwatcher.go index d29884e43d7..1b9f7dcbb2a 100644 --- a/cmd/fanoutsidecar/channelwatcher.go +++ b/pkg/channelwatcher/channelwatcher.go @@ -1,4 +1,4 @@ -package main +package channelwatcher import ( "context" diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index a6a58011042..7df9356cc5e 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -248,6 +248,8 @@ func UpdateChannel(ctx context.Context, client runtimeClient.Client, u *eventing // OwnerReferences on the resource so handleObject can discover the Channel resource that 'owns' it. // As well as being garbage collected when the Channel is deleted. func newK8sService(c *eventingv1alpha1.Channel) *corev1.Service { + // TODO: Need to check if generated name truncates the channel name in case channel name is tool long + // Add annotations return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ GenerateName: channelServiceName(c.ObjectMeta.Name), @@ -262,12 +264,8 @@ func newK8sService(c *eventingv1alpha1.Channel) *corev1.Service { }, }, Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Name: PortName, - Port: PortNumber, - }, - }, + Type: "ExternalName", + ExternalName: names.ServiceHostName(channelDispatcherServiceName(c.Spec.Provisioner.Name), system.Namespace()), }, } } diff --git a/pkg/provisioners/inmemory/channel/controller.go b/pkg/provisioners/inmemory/channel/controller.go index 7ff6128759a..6edf558e411 100644 --- a/pkg/provisioners/inmemory/channel/controller.go +++ b/pkg/provisioners/inmemory/channel/controller.go @@ -19,10 +19,8 @@ package channel import ( eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" - "github.com/knative/pkg/system" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -33,18 +31,6 @@ const ( // controllerAgentName is the string used by this controller to identify // itself when creating events. controllerAgentName = "in-memory-channel-controller" - - // ConfigMapName is the name of the ConfigMap in the knative-eventing namespace that contains - // the subscription information for all in-memory Channels. The Provisioner writes to it and the - // Dispatcher reads from it. - ConfigMapName = "in-memory-channel-dispatcher-config-map" -) - -var ( - defaultConfigMapKey = types.NamespacedName{ - Namespace: system.Namespace(), - Name: ConfigMapName, - } ) // ProvideController returns a Controller that represents the in-memory-channel Provisioner. @@ -52,9 +38,8 @@ func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Cont // Setup a new controller to Reconcile Channels that belong to this Cluster Provisioner // (in-memory channels). r := &reconciler{ - configMapKey: defaultConfigMapKey, - recorder: mgr.GetRecorder(controllerAgentName), - logger: logger, + recorder: mgr.GetRecorder(controllerAgentName), + logger: logger, } c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: r, diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go index 53ed753244e..5db47a21063 100644 --- a/pkg/provisioners/inmemory/channel/reconcile.go +++ b/pkg/provisioners/inmemory/channel/reconcile.go @@ -21,7 +21,6 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" @@ -32,7 +31,6 @@ import ( util "github.com/knative/eventing/pkg/provisioners" ccpcontroller "github.com/knative/eventing/pkg/provisioners/inmemory/clusterchannelprovisioner" "github.com/knative/eventing/pkg/reconciler/names" - "github.com/knative/eventing/pkg/sidecar/configmap" "github.com/knative/eventing/pkg/sidecar/fanout" "github.com/knative/eventing/pkg/sidecar/multichannelfanout" ) @@ -53,8 +51,6 @@ type reconciler struct { client client.Client recorder record.EventRecorder logger *zap.Logger - - configMapKey client.ObjectKey } // Verify the struct implements reconcile.Reconciler @@ -93,9 +89,6 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } logger.Info("Reconciling Channel") - // Modify a copy, not the original. - c = c.DeepCopy() - err = r.reconcile(ctx, c) if err != nil { logger.Info("Error reconciling Channel", zap.Error(err)) @@ -130,16 +123,8 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) // We are syncing three things: // 1. The K8s Service to talk to this Channel. - // 2. The Istio VirtualService to talk to this Channel. // 3. The configuration of all Channel subscriptions. - // We always need to sync the Channel config, so do it first. - if err := r.syncChannelConfig(ctx); err != nil { - logger.Info("Error syncing the Channel config", zap.Error(err)) - r.recorder.Eventf(c, corev1.EventTypeWarning, channelConfigSyncFailed, "Failed to sync Channel config: %v", err) - return err - } - if c.DeletionTimestamp != nil { // K8s garbage collection will delete the K8s service and VirtualService for this channel. // We use a finalizer to ensure the channel config has been synced. @@ -149,89 +134,24 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) util.AddFinalizer(c, finalizerName) + // We use a single dispatcher for both in-memory and in-memory-channel provisioners. + // + originalProvisionerName := c.Spec.Provisioner.Name + c.Spec.Provisioner.Name = defaultProvisionerName svc, err := util.CreateK8sService(ctx, r.client, c) if err != nil { logger.Info("Error creating the Channel's K8s Service", zap.Error(err)) r.recorder.Eventf(c, corev1.EventTypeWarning, k8sServiceCreateFailed, "Failed to reconcile Channel's K8s Service: %v", err) return err } - c.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace)) + c.Spec.Provisioner.Name = originalProvisionerName - if c.Spec.Provisioner.Name == defaultProvisionerName { - _, err = util.CreateVirtualService(ctx, r.client, c, svc) - if err != nil { - logger.Info("Error creating the Virtual Service for the Channel", zap.Error(err)) - r.recorder.Eventf(c, corev1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err) - return err - } - } else { - // We need to have a single dispatcher that is pointed at by _both_ - // ClusterChannelProvisioners. So fake the channel, by saying that its provisioner is the - // one with the single dispatcher. The faked provisioner is used only to determine the - // dispatcher Service's name. - cCopy := c.DeepCopy() - cCopy.Spec.Provisioner.Name = defaultProvisionerName - _, err = util.CreateVirtualService(ctx, r.client, cCopy, svc) - if err != nil { - logger.Info("Error creating the Virtual Service for the Channel", zap.Error(err)) - r.recorder.Eventf(c, corev1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err) - return err - } - } + c.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace)) c.Status.MarkProvisioned() return nil } -func (r *reconciler) syncChannelConfig(ctx context.Context) error { - channels, err := r.listAllChannels(ctx) - if err != nil { - r.logger.Info("Unable to list channels", zap.Error(err)) - return err - } - config := multiChannelFanoutConfig(channels) - return r.writeConfigMap(ctx, config) -} - -func (r *reconciler) writeConfigMap(ctx context.Context, config *multichannelfanout.Config) error { - logger := r.logger.With(zap.Any("configMap", r.configMapKey)) - - updated, err := configmap.SerializeConfig(*config) - if err != nil { - r.logger.Error("Unable to serialize config", zap.Error(err), zap.Any("config", config)) - return err - } - - cm := &corev1.ConfigMap{} - err = r.client.Get(ctx, r.configMapKey, cm) - if errors.IsNotFound(err) { - cm = r.createNewConfigMap(updated) - err = r.client.Create(ctx, cm) - } - if err != nil { - logger.Info("Unable to get/create ConfigMap", zap.Error(err)) - return err - } - - if equality.Semantic.DeepEqual(cm.Data, updated) { - // Nothing to update. - return nil - } - - cm.Data = updated - return r.client.Update(ctx, cm) -} - -func (r *reconciler) createNewConfigMap(data map[string]string) *corev1.ConfigMap { - return &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: r.configMapKey.Namespace, - Name: r.configMapKey.Name, - }, - Data: data, - } -} - func multiChannelFanoutConfig(channels []eventingv1alpha1.Channel) *multichannelfanout.Config { cc := make([]multichannelfanout.ChannelConfig, 0) for _, c := range channels { diff --git a/pkg/provisioners/inmemory/controller/main.go b/pkg/provisioners/inmemory/controller/main.go index d8da2d062b4..99ee64f885e 100644 --- a/pkg/provisioners/inmemory/controller/main.go +++ b/pkg/provisioners/inmemory/controller/main.go @@ -29,6 +29,9 @@ import ( "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" + + // uncomment this line to debug in GKE from local machine + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) func main() { diff --git a/pkg/reconciler/v1alpha1/broker/resources/ingress.go b/pkg/reconciler/v1alpha1/broker/resources/ingress.go index f83a991a7b1..f4eb40cd85c 100644 --- a/pkg/reconciler/v1alpha1/broker/resources/ingress.go +++ b/pkg/reconciler/v1alpha1/broker/resources/ingress.go @@ -58,9 +58,9 @@ func MakeIngress(args *IngressArgs) *appsv1.Deployment { Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: ingressLabels(args.Broker), - Annotations: map[string]string{ - "sidecar.istio.io/inject": "true", - }, + // Annotations: map[string]string{ + // "sidecar.istio.io/inject": "true", + // }, }, Spec: corev1.PodSpec{ ServiceAccountName: args.ServiceAccountName, diff --git a/pkg/sidecar/swappable/swappable.go b/pkg/sidecar/swappable/swappable.go index 70de3edab2c..3cff72630df 100644 --- a/pkg/sidecar/swappable/swappable.go +++ b/pkg/sidecar/swappable/swappable.go @@ -24,6 +24,7 @@ package swappable import ( "errors" + "fmt" "net/http" "sync" "sync/atomic" @@ -102,6 +103,9 @@ func (h *Handler) UpdateConfig(config *multichannelfanout.Config) error { // ServeHTTP delegates all HTTP requests to the current multichannelfanout.Handler. func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // TODO: delete this debugging code + fmt.Sprintf("Request: %+v", r) + // Hand work off to the current multi channel fanout handler. h.logger.Debug("ServeHTTP request received") h.getMultiChannelFanoutHandler().ServeHTTP(w, r) diff --git a/test/crd.go b/test/crd.go index e4a000b2c59..139b33de079 100644 --- a/test/crd.go +++ b/test/crd.go @@ -164,9 +164,8 @@ func EventSenderPod(name string, namespace string, sink string, event CloudEvent return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: map[string]string{"sidecar.istio.io/inject": "true"}, + Name: name, + Namespace: namespace, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{ From 2a4faae5e4969868b1d2f63b3dd87992b816c784 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Tue, 9 Apr 2019 10:33:56 -0700 Subject: [PATCH 03/37] WIP - remove istio dependency from in-memroy channel --- cmd/broker/ingress/main.go | 17 + .../in-memory-channel/in-memory-channel.yaml | 19 +- .../pkg/controller/channel/reconcile.go | 14 +- pkg/provisioners/channel_util.go | 39 +- .../inmemory/channel/controller.go | 10 - .../inmemory/channel/reconcile.go | 44 +- .../inmemory/channel/reconcile_test.go | 375 +---------------- .../clusterchannelprovisioner/reconcile.go | 12 +- .../reconcile_test.go | 65 ++- pkg/provisioners/provisioner_util.go | 24 +- .../filesystem/filesystem_watcher.go | 126 ++++++ .../filesystem/filesystem_watcher_test.go | 379 ++++++++++++++++++ pkg/sidecar/configmap/parse.go | 54 +++ pkg/sidecar/configmap/parse_test.go | 213 ++++++++++ pkg/sidecar/configmap/watcher/watcher.go | 49 +++ pkg/sidecar/configmap/watcher/watcher_test.go | 125 ++++++ .../multi_channel_fanout_handler_test.go | 49 +-- pkg/sidecar/swappable/swappable_test.go | 24 +- 18 files changed, 1127 insertions(+), 511 deletions(-) create mode 100644 pkg/sidecar/configmap/filesystem/filesystem_watcher.go create mode 100644 pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go create mode 100644 pkg/sidecar/configmap/parse.go create mode 100644 pkg/sidecar/configmap/parse_test.go create mode 100644 pkg/sidecar/configmap/watcher/watcher.go create mode 100644 pkg/sidecar/configmap/watcher/watcher_test.go diff --git a/cmd/broker/ingress/main.go b/cmd/broker/ingress/main.go index ea0094fba73..efafda14aae 100644 --- a/cmd/broker/ingress/main.go +++ b/cmd/broker/ingress/main.go @@ -17,10 +17,12 @@ package main import ( + "bytes" "context" "errors" "flag" "fmt" + "io/ioutil" "log" "net/http" "net/url" @@ -221,6 +223,20 @@ func (h *handler) serveHTTP(ctx context.Context, event cloudevents.Event, resp * } func (h *handler) sendEvent(ctx context.Context, tctx cehttp.TransportContext, event cloudevents.Event) error { + + //url := "http://external-service.knative-eventing.svc.cluster.local" + resp, err1 := http.Post(h.channelURI.String(), "application/json", bytes.NewBuffer([]byte{})) + if err1 != nil { + log.Println("Error:", err1) + } + body, err1 := ioutil.ReadAll(resp.Body) + if err1 != nil { + log.Fatalln(err1) + } + log.Println(fmt.Sprintf("Reponse: %+v", resp)) + log.Println(fmt.Sprintf("ReponseBody from server: %v", string(body))) + + fmt.Println("ChannelURI: ", h.channelURI) sendingCTX := broker.SendingContext(ctx, tctx, h.channelURI) startTS := time.Now() @@ -232,6 +248,7 @@ func (h *handler) sendEvent(ctx context.Context, tctx cehttp.TransportContext, e _, err := h.ceClient.Send(sendingCTX, event) if err != nil { sendingCTX, _ = tag.New(sendingCTX, tag.Insert(TagResult, "error")) + fmt.Println("Error: ", err) } else { sendingCTX, _ = tag.New(sendingCTX, tag.Insert(TagResult, "ok")) } diff --git a/config/provisioners/in-memory-channel/in-memory-channel.yaml b/config/provisioners/in-memory-channel/in-memory-channel.yaml index cec26e323e3..3f466ff5e41 100644 --- a/config/provisioners/in-memory-channel/in-memory-channel.yaml +++ b/config/provisioners/in-memory-channel/in-memory-channel.yaml @@ -62,7 +62,6 @@ rules: - apiGroups: - "" # Core API group. resources: - - configmaps - services verbs: - get @@ -83,22 +82,6 @@ rules: - services verbs: - update - - apiGroups: - - "" # Core API Group. - resources: - - configmaps - verbs: - - update - - apiGroups: - - networking.istio.io - resources: - - virtualservices - verbs: - - get - - list - - watch - - create - - update - apiGroups: - "" # Core API Group. resources: @@ -205,6 +188,8 @@ spec: role: dispatcher template: metadata: + annotations: + sidecar.istio.io/inject: "true" labels: *labels spec: serviceAccountName: in-memory-channel-dispatcher diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile.go b/contrib/gcppubsub/pkg/controller/channel/reconcile.go index a1836d83e8a..b446f11de3c 100644 --- a/contrib/gcppubsub/pkg/controller/channel/reconcile.go +++ b/contrib/gcppubsub/pkg/controller/channel/reconcile.go @@ -121,6 +121,9 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } logging.FromContext(ctx).Info("Reconciling Channel") + // Modify a copy, not the original. + c = c.DeepCopy() + ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With(zap.Any("channel", c))) requeue, reconcileErr := r.reconcile(ctx, c) if reconcileErr != nil { @@ -160,8 +163,9 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) // We are syncing four things: // 1. The K8s Service to talk to this Channel. - // 2. The GCP PubSub Topic (one for the Channel). - // 3. The GCP PubSub Subscriptions (one for each Subscriber of the Channel). + // 2. The Istio VirtualService to talk to this Channel. + // 3. The GCP PubSub Topic (one for the Channel). + // 4. The GCP PubSub Subscriptions (one for each Subscriber of the Channel). // First we will plan all the names out for steps 3 and 4 persist them to status.internal. Then, on a // subsequent reconcile, we manipulate all the GCP resources in steps 3 and 4. @@ -233,6 +237,12 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) return false, err } + err = r.createVirtualService(ctx, c, svc) + if err != nil { + r.recorder.Eventf(c, v1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err) + return false, err + } + topic, err := r.createTopic(ctx, plannedPCS, gcpCreds) if err != nil { r.recorder.Eventf(c, v1.EventTypeWarning, topicCreateFailed, "Failed to reconcile Topic for the Channel: %v", err) diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index 7df9356cc5e..d52581c8116 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -63,11 +63,28 @@ func RemoveFinalizer(o metav1.Object, finalizerName string) { o.SetFinalizers(finalizers.List()) } -func CreateK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel) (*corev1.Service, error) { +type k8sServiceOption func(*corev1.Service) error + +// ExternalService is a functional option for CreateK8sService to create a K8s service of type ExternalName +func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { + return func(svc *corev1.Service) error { + svc.Spec = corev1.ServiceSpec{ + Type: "ExternalName", + ExternalName: names.ServiceHostName(channelDispatcherServiceName(c.Spec.Provisioner.Name), system.Namespace()), + } + return nil + } +} + +func CreateK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel, opts ...k8sServiceOption) (*corev1.Service, error) { getSvc := func() (*corev1.Service, error) { return getK8sService(ctx, client, c) } - return createK8sService(ctx, client, getSvc, newK8sService(c)) + svc, err := newK8sService(c, opts...) + if err != nil { + return nil, err + } + return createK8sService(ctx, client, getSvc, svc) } func getK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel) (*corev1.Service, error) { @@ -247,10 +264,10 @@ func UpdateChannel(ctx context.Context, client runtimeClient.Client, u *eventing // newK8sService creates a new Service for a Channel resource. It also sets the appropriate // OwnerReferences on the resource so handleObject can discover the Channel resource that 'owns' it. // As well as being garbage collected when the Channel is deleted. -func newK8sService(c *eventingv1alpha1.Channel) *corev1.Service { +func newK8sService(c *eventingv1alpha1.Channel, opts ...k8sServiceOption) (*corev1.Service, error) { // TODO: Need to check if generated name truncates the channel name in case channel name is tool long // Add annotations - return &corev1.Service{ + svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ GenerateName: channelServiceName(c.ObjectMeta.Name), Namespace: c.Namespace, @@ -264,10 +281,20 @@ func newK8sService(c *eventingv1alpha1.Channel) *corev1.Service { }, }, Spec: corev1.ServiceSpec{ - Type: "ExternalName", - ExternalName: names.ServiceHostName(channelDispatcherServiceName(c.Spec.Provisioner.Name), system.Namespace()), + Ports: []corev1.ServicePort{ + { + Name: PortName, + Port: PortNumber, + }, + }, }, } + for _, opt := range opts { + if err := opt(svc); err != nil { + return nil, err + } + } + return svc, nil } // k8sOldServiceLabels returns a map with only old eventing channel and provisioner labels diff --git a/pkg/provisioners/inmemory/channel/controller.go b/pkg/provisioners/inmemory/channel/controller.go index 6edf558e411..88f0e96233f 100644 --- a/pkg/provisioners/inmemory/channel/controller.go +++ b/pkg/provisioners/inmemory/channel/controller.go @@ -18,7 +18,6 @@ package channel import ( eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -67,14 +66,5 @@ func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Cont return nil, err } - // Watch the VirtualServices that are owned by Channels. - err = c.Watch(&source.Kind{ - Type: &istiov1alpha3.VirtualService{}, - }, &handler.EnqueueRequestForOwner{OwnerType: &eventingv1alpha1.Channel{}, IsController: true}) - if err != nil { - logger.Error("Unable to watch VirtualServices.", zap.Error(err)) - return nil, err - } - return c, nil } diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go index 5db47a21063..fffbdc34c64 100644 --- a/pkg/provisioners/inmemory/channel/reconcile.go +++ b/pkg/provisioners/inmemory/channel/reconcile.go @@ -31,18 +31,14 @@ import ( util "github.com/knative/eventing/pkg/provisioners" ccpcontroller "github.com/knative/eventing/pkg/provisioners/inmemory/clusterchannelprovisioner" "github.com/knative/eventing/pkg/reconciler/names" - "github.com/knative/eventing/pkg/sidecar/fanout" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" ) const ( finalizerName = controllerAgentName // Name of the corev1.Events emitted from the reconciliation process - channelReconciled = "ChannelReconciled" - channelUpdateStatusFailed = "ChannelUpdateStatusFailed" - channelConfigSyncFailed = "ChannelConfigSyncFailed" - k8sServiceCreateFailed = "K8sServiceCreateFailed" - virtualServiceCreateFailed = "VirtualServiceCreateFailed" + channelReconciled = "ChannelReconciled" + channelUpdateStatusFailed = "ChannelUpdateStatusFailed" + k8sServiceCreateFailed = "K8sServiceCreateFailed" // TODO after in-memory-channel is retired, asyncProvisionerName should be removed defaultProvisionerName = "in-memory-channel" ) @@ -126,7 +122,7 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) // 3. The configuration of all Channel subscriptions. if c.DeletionTimestamp != nil { - // K8s garbage collection will delete the K8s service and VirtualService for this channel. + // K8s garbage collection will delete the K8s service for this channel. // We use a finalizer to ensure the channel config has been synced. util.RemoveFinalizer(c, finalizerName) return nil @@ -134,17 +130,12 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) util.AddFinalizer(c, finalizerName) - // We use a single dispatcher for both in-memory and in-memory-channel provisioners. - // - originalProvisionerName := c.Spec.Provisioner.Name - c.Spec.Provisioner.Name = defaultProvisionerName - svc, err := util.CreateK8sService(ctx, r.client, c) + svc, err := util.CreateK8sService(ctx, r.client, c, util.ExternalService(c)) if err != nil { logger.Info("Error creating the Channel's K8s Service", zap.Error(err)) r.recorder.Eventf(c, corev1.EventTypeWarning, k8sServiceCreateFailed, "Failed to reconcile Channel's K8s Service: %v", err) return err } - c.Spec.Provisioner.Name = originalProvisionerName c.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace)) @@ -152,31 +143,6 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) return nil } -func multiChannelFanoutConfig(channels []eventingv1alpha1.Channel) *multichannelfanout.Config { - cc := make([]multichannelfanout.ChannelConfig, 0) - for _, c := range channels { - channelConfig := multichannelfanout.ChannelConfig{ - Namespace: c.Namespace, - Name: c.Name, - } - if c.Spec.Subscribable != nil { - // TODO After in-memory-channel is retired, this logic must be refactored. - asyncHandler := false - if c.Spec.Provisioner.Name != defaultProvisionerName { - asyncHandler = true - } - channelConfig.FanoutConfig = fanout.Config{ - Subscriptions: c.Spec.Subscribable.Subscribers, - AsyncHandler: asyncHandler, - } - } - cc = append(cc, channelConfig) - } - return &multichannelfanout.Config{ - ChannelConfigs: cc, - } -} - func (r *reconciler) listAllChannels(ctx context.Context) ([]eventingv1alpha1.Channel, error) { channels := make([]eventingv1alpha1.Channel, 0) diff --git a/pkg/provisioners/inmemory/channel/reconcile_test.go b/pkg/provisioners/inmemory/channel/reconcile_test.go index 30b9b2ac27b..211f3fc5a03 100644 --- a/pkg/provisioners/inmemory/channel/reconcile_test.go +++ b/pkg/provisioners/inmemory/channel/reconcile_test.go @@ -18,27 +18,25 @@ package channel import ( "context" - "encoding/json" "errors" "fmt" "testing" - "github.com/google/go-cmp/cmp" eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" util "github.com/knative/eventing/pkg/provisioners" + "github.com/knative/eventing/pkg/reconciler/names" controllertesting "github.com/knative/eventing/pkg/reconciler/testing" - "github.com/knative/eventing/pkg/sidecar/configmap" "github.com/knative/eventing/pkg/sidecar/fanout" "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/utils" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "github.com/knative/pkg/system" _ "github.com/knative/pkg/system/testing" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -56,8 +54,6 @@ const ( cmName = "test-config-map" testErrorMessage = "test induced error" - - insertedByVerifyConfigMapData = "data inserted by verifyConfigMapData so that it can be WantPresent" ) var ( @@ -183,11 +179,9 @@ var ( // map of events to set test cases' expectations easier events = map[string]corev1.Event{ - channelReconciled: {Reason: channelReconciled, Type: corev1.EventTypeNormal}, - channelUpdateStatusFailed: {Reason: channelUpdateStatusFailed, Type: corev1.EventTypeWarning}, - channelConfigSyncFailed: {Reason: channelConfigSyncFailed, Type: corev1.EventTypeWarning}, - k8sServiceCreateFailed: {Reason: k8sServiceCreateFailed, Type: corev1.EventTypeWarning}, - virtualServiceCreateFailed: {Reason: virtualServiceCreateFailed, Type: corev1.EventTypeWarning}, + channelReconciled: {Reason: channelReconciled, Type: corev1.EventTypeNormal}, + channelUpdateStatusFailed: {Reason: channelUpdateStatusFailed, Type: corev1.EventTypeWarning}, + k8sServiceCreateFailed: {Reason: k8sServiceCreateFailed, Type: corev1.EventTypeWarning}, } ) @@ -250,23 +244,6 @@ func TestReconcile(t *testing.T) { makeChannelWithWrongProvisionerName(), }, }, - { - Name: "Channel deleted - Channel config sync fails", - InitialState: []runtime.Object{ - makeDeletingChannel(), - }, - Mocks: controllertesting.Mocks{ - MockLists: errorListingChannels(), - }, - WantPresent: []runtime.Object{ - // Finalizer has not been removed. - makeDeletingChannel(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[channelConfigSyncFailed], - }, - }, { Name: "Channel deleted - finalizer removed", InitialState: []runtime.Object{ @@ -279,64 +256,10 @@ func TestReconcile(t *testing.T) { events[channelReconciled], }, }, - { - Name: "Channel config sync fails - can't list Channels", - InitialState: []runtime.Object{ - makeChannel(), - }, - Mocks: controllertesting.Mocks{ - MockLists: errorListingChannels(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[channelConfigSyncFailed], - }, - }, - { - Name: "Channel config sync fails - can't get ConfigMap", - InitialState: []runtime.Object{ - makeChannel(), - }, - Mocks: controllertesting.Mocks{ - MockGets: errorGettingConfigMap(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[channelConfigSyncFailed], - }, - }, - { - Name: "Channel config sync fails - can't create ConfigMap", - InitialState: []runtime.Object{ - makeChannel(), - }, - Mocks: controllertesting.Mocks{ - MockCreates: errorCreatingConfigMap(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[channelConfigSyncFailed], - }, - }, - { - Name: "Channel config sync fails - can't update ConfigMap", - InitialState: []runtime.Object{ - makeChannel(), - makeConfigMap(), - }, - Mocks: controllertesting.Mocks{ - MockUpdates: errorUpdatingConfigMap(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[channelConfigSyncFailed], - }, - }, { Name: "K8s service get fails", InitialState: []runtime.Object{ makeChannel(), - makeConfigMap(), }, Mocks: controllertesting.Mocks{ MockLists: errorListingK8sService(), @@ -353,7 +276,6 @@ func TestReconcile(t *testing.T) { Name: "K8s service creation fails", InitialState: []runtime.Object{ makeChannel(), - makeConfigMap(), }, Mocks: controllertesting.Mocks{ MockCreates: errorCreatingK8sService(), @@ -367,54 +289,11 @@ func TestReconcile(t *testing.T) { events[k8sServiceCreateFailed], }, }, - { - Name: "Virtual service get fails", - InitialState: []runtime.Object{ - makeChannel(), - makeConfigMap(), - makeK8sService(), - makeVirtualService(), - }, - Mocks: controllertesting.Mocks{ - MockLists: errorListingVirtualService(), - }, - WantPresent: []runtime.Object{ - // TODO: This should have a useful error message saying that the VirtualService - // failed. - makeChannelWithFinalizerAndAddress(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[virtualServiceCreateFailed], - }, - }, - { - Name: "Virtual service creation fails", - InitialState: []runtime.Object{ - makeChannel(), - makeConfigMap(), - makeK8sService(), - }, - Mocks: controllertesting.Mocks{ - MockCreates: errorCreatingVirtualService(), - }, - WantPresent: []runtime.Object{ - // TODO: This should have a useful error message saying that the VirtualService - // failed. - makeChannelWithFinalizerAndAddress(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[virtualServiceCreateFailed], - }, - }, { Name: "Channel get for update fails", InitialState: []runtime.Object{ makeChannel(), - makeConfigMap(), makeK8sService(), - makeVirtualService(), }, Mocks: controllertesting.Mocks{ MockGets: errorOnSecondChannelGet(), @@ -428,9 +307,7 @@ func TestReconcile(t *testing.T) { Name: "Channel update fails", InitialState: []runtime.Object{ makeChannel(), - makeConfigMap(), makeK8sService(), - makeVirtualService(), }, Mocks: controllertesting.Mocks{ MockUpdates: errorUpdatingChannel(), @@ -443,9 +320,7 @@ func TestReconcile(t *testing.T) { Name: "Channel status update fails", InitialState: []runtime.Object{ makeChannel(), - makeConfigMap(), makeK8sService(), - makeVirtualService(), }, Mocks: controllertesting.Mocks{ MockStatusUpdates: errorUpdatingChannelStatus(), @@ -454,83 +329,14 @@ func TestReconcile(t *testing.T) { WantEvent: []corev1.Event{ events[channelReconciled], events[channelUpdateStatusFailed], }, - }, { - Name: "Channel reconcile successful - Channel list follows pagination", - InitialState: []runtime.Object{ - makeChannel(), - makeConfigMap(), - }, - Mocks: controllertesting.Mocks{ - MockLists: (&paginatedChannelsListStruct{channels: channels}).MockLists(), - // This is more accurate to be in WantPresent, but we need to check JSON equality, - // not string equality, so it can't be done in WantPresent. Instead, we verify - // during the update call, swapping out the data and WantPresent with that inserted - // data. - MockUpdates: verifyConfigMapData(channelsConfig), - }, - WantPresent: []runtime.Object{ - makeReadyChannel(), - makeK8sService(), - makeVirtualService(), - makeConfigMapWithVerifyConfigMapData(), - }, - WantEvent: []corev1.Event{ - events[channelReconciled], - }, - }, - { - Name: "Channel reconcile successful - Channel has no subscribers", - InitialState: []runtime.Object{ - makeChannel(), - makeConfigMap(), - }, - Mocks: controllertesting.Mocks{ - MockLists: (&paginatedChannelsListStruct{channels: []eventingv1alpha1.Channel{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: "high-consul", - Name: "duarte", - }, - Spec: eventingv1alpha1.ChannelSpec{ - Provisioner: &corev1.ObjectReference{ - Name: ccpName, - }, - }, - }, - }}).MockLists(), - // This is more accurate to be in WantPresent, but we need to check JSON equality, - // not string equality, so it can't be done in WantPresent. Instead, we verify - // during the update call, swapping out the data and WantPresent with that inserted - // data. - MockUpdates: verifyConfigMapData(multichannelfanout.Config{ - ChannelConfigs: []multichannelfanout.ChannelConfig{ - { - Namespace: "high-consul", - Name: "duarte", - }, - }, - }), - }, - WantPresent: []runtime.Object{ - makeReadyChannel(), - makeK8sService(), - makeVirtualService(), - makeConfigMapWithVerifyConfigMapData(), - }, - WantEvent: []corev1.Event{ - events[channelReconciled], - }, }, { Name: "Channel reconcile successful - Async channel", - // VirtualService should have channel provisioner name - // defaults to in-memory-channel but the service should match provisioner's service name InitialState: []runtime.Object{ makeChannel("in-memory"), }, Mocks: controllertesting.Mocks{}, WantPresent: []runtime.Object{ - makeVirtualService(), makeK8sService("in-memory"), }, WantEvent: []corev1.Event{ @@ -539,14 +345,11 @@ func TestReconcile(t *testing.T) { }, { Name: "Channel reconcile successful - Non Async channel", - // VirtualService should have channel provisioner name - // defaults to in-memory-channel InitialState: []runtime.Object{ makeChannel(), }, Mocks: controllertesting.Mocks{}, WantPresent: []runtime.Object{ - makeVirtualService(), makeK8sService(), }, WantEvent: []corev1.Event{ @@ -556,17 +359,12 @@ func TestReconcile(t *testing.T) { } for _, tc := range testCases { - configMapKey := types.NamespacedName{ - Namespace: cmNamespace, - Name: cmName, - } c := tc.GetClient() recorder := tc.GetEventRecorder() r := &reconciler{ - client: c, - recorder: recorder, - logger: zap.NewNop(), - configMapKey: configMapKey, + client: c, + recorder: recorder, + logger: zap.NewNop(), } if tc.ReconcileKey == "" { tc.ReconcileKey = fmt.Sprintf("/%s", cName) @@ -656,26 +454,6 @@ func makeDeletingChannelWithoutFinalizer() *eventingv1alpha1.Channel { return c } -func makeConfigMap() *corev1.ConfigMap { - return &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: cmNamespace, - Name: cmName, - }, - } -} - -func makeConfigMapWithVerifyConfigMapData() *corev1.ConfigMap { - cm := makeConfigMap() - cm.Data = map[string]string{} - cm.Data[configmap.MultiChannelFanoutConfigKey] = insertedByVerifyConfigMapData - return cm -} - func makeK8sService(pn ...string) *corev1.Service { return &corev1.Service{ TypeMeta: metav1.TypeMeta{ @@ -703,60 +481,8 @@ func makeK8sService(pn ...string) *corev1.Service { }, }, Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Name: util.PortName, - Port: util.PortNumber, - }, - }, - }, - } -} - -func makeVirtualService() *istiov1alpha3.VirtualService { - return &istiov1alpha3.VirtualService{ - TypeMeta: metav1.TypeMeta{ - APIVersion: istiov1alpha3.SchemeGroupVersion.String(), - Kind: "VirtualService", - }, - ObjectMeta: metav1.ObjectMeta{ - GenerateName: fmt.Sprintf("%s-channel-", cName), - Namespace: cNamespace, - Labels: map[string]string{ - util.EventingChannelLabel: cName, - util.OldEventingChannelLabel: cName, - util.EventingProvisionerLabel: ccpName, - util.OldEventingProvisionerLabel: ccpName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), - Kind: "Channel", - Name: cName, - UID: cUID, - Controller: &truePointer, - BlockOwnerDeletion: &truePointer, - }, - }, - }, - Spec: istiov1alpha3.VirtualServiceSpec{ - Hosts: []string{ - serviceAddress, - fmt.Sprintf("%s.%s.channels.%s", cName, cNamespace, utils.GetClusterDomainName()), - }, - HTTP: []istiov1alpha3.HTTPRoute{{ - Rewrite: &istiov1alpha3.HTTPRewrite{ - Authority: fmt.Sprintf("%s.%s.channels.%s", cName, cNamespace, utils.GetClusterDomainName()), - }, - Route: []istiov1alpha3.DestinationWeight{{ - Destination: istiov1alpha3.Destination{ - Host: "in-memory-channel-dispatcher.knative-testing.svc." + utils.GetClusterDomainName(), - Port: istiov1alpha3.PortSelector{ - Number: util.PortNumber, - }, - }}, - }}, - }, + ExternalName: names.ServiceHostName(fmt.Sprintf("%s-dispatcher", getProvisionerName(pn)), system.Namespace()), + Type: "ExternalName", }, } } @@ -780,18 +506,6 @@ func errorGettingChannel() []controllertesting.MockGet { }, } } - -func errorGettingConfigMap() []controllertesting.MockGet { - return []controllertesting.MockGet{ - func(_ client.Client, _ context.Context, _ client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) { - if _, ok := obj.(*corev1.ConfigMap); ok { - return controllertesting.Handled, errors.New(testErrorMessage) - } - return controllertesting.Unhandled, nil - }, - } -} - func errorListingK8sService() []controllertesting.MockList { return []controllertesting.MockList{ func(_ client.Client, _ context.Context, _ *client.ListOptions, obj runtime.Object) (controllertesting.MockHandled, error) { @@ -803,17 +517,6 @@ func errorListingK8sService() []controllertesting.MockList { } } -func errorListingVirtualService() []controllertesting.MockList { - return []controllertesting.MockList{ - func(_ client.Client, _ context.Context, _ *client.ListOptions, obj runtime.Object) (controllertesting.MockHandled, error) { - if _, ok := obj.(*istiov1alpha3.VirtualServiceList); ok { - return controllertesting.Handled, errors.New(testErrorMessage) - } - return controllertesting.Unhandled, nil - }, - } -} - func errorListingChannels() []controllertesting.MockList { return []controllertesting.MockList{ func(client.Client, context.Context, *client.ListOptions, runtime.Object) (controllertesting.MockHandled, error) { @@ -822,17 +525,6 @@ func errorListingChannels() []controllertesting.MockList { } } -func errorCreatingConfigMap() []controllertesting.MockCreate { - return []controllertesting.MockCreate{ - func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { - if _, ok := obj.(*corev1.ConfigMap); ok { - return controllertesting.Handled, errors.New(testErrorMessage) - } - return controllertesting.Unhandled, nil - }, - } -} - func errorCreatingK8sService() []controllertesting.MockCreate { return []controllertesting.MockCreate{ func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { @@ -844,17 +536,6 @@ func errorCreatingK8sService() []controllertesting.MockCreate { } } -func errorCreatingVirtualService() []controllertesting.MockCreate { - return []controllertesting.MockCreate{ - func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { - if _, ok := obj.(*istiov1alpha3.VirtualService); ok { - return controllertesting.Handled, errors.New(testErrorMessage) - } - return controllertesting.Unhandled, nil - }, - } -} - func errorUpdatingChannel() []controllertesting.MockUpdate { return []controllertesting.MockUpdate{ func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { @@ -877,17 +558,6 @@ func errorUpdatingChannelStatus() []controllertesting.MockStatusUpdate { } } -func errorUpdatingConfigMap() []controllertesting.MockUpdate { - return []controllertesting.MockUpdate{ - func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { - if _, ok := obj.(*corev1.ConfigMap); ok { - return controllertesting.Handled, errors.New(testErrorMessage) - } - return controllertesting.Unhandled, nil - }, - } -} - type paginatedChannelsListStruct struct { channels []eventingv1alpha1.Channel } @@ -911,28 +581,3 @@ func (p *paginatedChannelsListStruct) MockLists() []controllertesting.MockList { }, } } - -func verifyConfigMapData(expected multichannelfanout.Config) []controllertesting.MockUpdate { - return []controllertesting.MockUpdate{ - func(innerClient client.Client, ctx context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { - if cm, ok := obj.(*corev1.ConfigMap); ok { - s := cm.Data[configmap.MultiChannelFanoutConfigKey] - c := multichannelfanout.Config{} - err := json.Unmarshal([]byte(s), &c) - if err != nil { - return controllertesting.Handled, - fmt.Errorf("test is unable to unmarshal ConfigMap data: %v", err) - } - if diff := cmp.Diff(c, expected); diff != "" { - return controllertesting.Handled, - fmt.Errorf("test got unwanted ChannelsConfig (-want +got) %s", diff) - } - // Verified it is correct, now so that we can verify this actually occurred, swap - // out the data with a known value for later comparison. - cm.Data[configmap.MultiChannelFanoutConfigKey] = insertedByVerifyConfigMapData - return controllertesting.Handled, innerClient.Update(ctx, obj) - } - return controllertesting.Unhandled, nil - }, - } -} diff --git a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go index 5e79fc3c802..5794b2dbafc 100644 --- a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go +++ b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go @@ -22,6 +22,7 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -153,7 +154,7 @@ func (r *reconciler) reconcile(ctx context.Context, ccp *eventingv1alpha1.Cluste return nil } - svc, err := util.CreateDispatcherService(ctx, r.client, ccp) + svc, err := util.CreateDispatcherService(ctx, r.client, ccp, setDispatcherServiceSelector()) if err != nil { logger.Info("Error creating the ClusterChannelProvisioner's K8s Service", zap.Error(err)) @@ -179,6 +180,15 @@ func (r *reconciler) reconcile(ctx context.Context, ccp *eventingv1alpha1.Cluste return nil } +// Since there are two provisioners "in-memry" and "in-memory-channel" but one single dispatcher service deployment, +// update the label of the K8s service to always point at the same dispatcher service deployment +func setDispatcherServiceSelector() util.ServiceOption { + return func(svc *v1.Service) error { + svc.Spec.Selector = util.DispatcherLabels("in-memory-channel") + return nil + } +} + func (r *reconciler) deleteOldDispatcherService(ctx context.Context, ccp *eventingv1alpha1.ClusterChannelProvisioner) error { svcName := fmt.Sprintf("%s-clusterbus", ccp.Name) svcKey := types.NamespacedName{ diff --git a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go index e4ff44abb9b..036e1424235 100644 --- a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go +++ b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go @@ -40,10 +40,11 @@ import ( ) const ( - ccpUID = "test-uid" - testErrorMessage = "test-induced-error" - testNS = "test-ns" - Name = "in-memory-channel" + ccpUID = "test-uid" + testErrorMessage = "test-induced-error" + testNS = "test-ns" + inMemoryChannelName = "in-memory-channel" + inMemoryName = "in-memory" ) var ( @@ -96,7 +97,7 @@ func TestIsControlled(t *testing.T) { "wrong namespace": { ref: &corev1.ObjectReference{ Namespace: "other", - Name: Name, + Name: inMemoryName, }, isControlled: false, }, @@ -108,7 +109,7 @@ func TestIsControlled(t *testing.T) { }, "is controlled": { ref: &corev1.ObjectReference{ - Name: Name, + Name: inMemoryName, }, isControlled: true, }, @@ -143,7 +144,7 @@ func TestReconcile(t *testing.T) { &eventingv1alpha1.ClusterChannelProvisioner{ ObjectMeta: metav1.ObjectMeta{ Namespace: "not empty string", - Name: Name, + Name: inMemoryName, }, }, }, @@ -240,6 +241,20 @@ func TestReconcile(t *testing.T) { events[ccpReconciled], }, }, + { + Name: "Create dispatcher succeeds - in-memory-Channel", + ReconcileKey: inMemoryChannelName, + InitialState: []runtime.Object{ + makeClusterChannelProvisionerOld(), + }, + WantPresent: []runtime.Object{ + makeReadyClusterChannelProvisionerOld(), + makeK8sServiceOld(), + }, + WantEvent: []corev1.Event{ + events[ccpReconciled], + }, + }, { Name: "Create dispatcher succeeds - request is namespace-scoped", InitialState: []runtime.Object{ @@ -249,7 +264,7 @@ func TestReconcile(t *testing.T) { makeReadyClusterChannelProvisioner(), makeK8sService(), }, - ReconcileKey: fmt.Sprintf("%s/%s", testNS, Name), + ReconcileKey: fmt.Sprintf("%s/%s", testNS, inMemoryName), WantEvent: []corev1.Event{ events[ccpReconciled], }, @@ -297,13 +312,19 @@ func TestReconcile(t *testing.T) { logger: zap.NewNop(), } if tc.ReconcileKey == "" { - tc.ReconcileKey = fmt.Sprintf("/%s", Name) + tc.ReconcileKey = fmt.Sprintf("/%s", inMemoryName) } tc.IgnoreTimes = true t.Run(tc.Name, tc.Runner(t, r, c, recorder)) } } +func makeClusterChannelProvisionerOld() *eventingv1alpha1.ClusterChannelProvisioner { + ccp := makeClusterChannelProvisioner() + ccp.SetName(inMemoryChannelName) + return ccp +} + func makeClusterChannelProvisioner() *eventingv1alpha1.ClusterChannelProvisioner { return &eventingv1alpha1.ClusterChannelProvisioner{ TypeMeta: metav1.TypeMeta{ @@ -311,7 +332,7 @@ func makeClusterChannelProvisioner() *eventingv1alpha1.ClusterChannelProvisioner Kind: "ClusterChannelProvisioner", }, ObjectMeta: metav1.ObjectMeta{ - Name: Name, + Name: inMemoryName, UID: ccpUID, }, Spec: eventingv1alpha1.ClusterChannelProvisionerSpec{}, @@ -328,6 +349,12 @@ func makeReadyClusterChannelProvisioner() *eventingv1alpha1.ClusterChannelProvis return ccp } +func makeReadyClusterChannelProvisionerOld() *eventingv1alpha1.ClusterChannelProvisioner { + ccp := makeReadyClusterChannelProvisioner() + ccp.Name = inMemoryChannelName + return ccp +} + func makeDeletingClusterChannelProvisioner() *eventingv1alpha1.ClusterChannelProvisioner { ccp := makeClusterChannelProvisioner() ccp.DeletionTimestamp = &deletionTime @@ -342,21 +369,21 @@ func makeK8sService() *corev1.Service { }, ObjectMeta: metav1.ObjectMeta{ Namespace: system.Namespace(), - Name: fmt.Sprintf("%s-dispatcher", Name), + Name: fmt.Sprintf("%s-dispatcher", inMemoryName), OwnerReferences: []metav1.OwnerReference{ { APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), Kind: "ClusterChannelProvisioner", - Name: Name, + Name: inMemoryName, UID: ccpUID, Controller: &truePointer, BlockOwnerDeletion: &truePointer, }, }, - Labels: util.DispatcherLabels(Name), + Labels: util.DispatcherLabels(inMemoryName), }, Spec: corev1.ServiceSpec{ - Selector: util.DispatcherLabels(Name), + Selector: util.DispatcherLabels(inMemoryChannelName), Ports: []corev1.ServicePort{ { Name: "http", @@ -368,9 +395,17 @@ func makeK8sService() *corev1.Service { } } +func makeK8sServiceOld() *corev1.Service { + svc := makeK8sService() + svc.SetName(fmt.Sprintf("%s-dispatcher", inMemoryChannelName)) + svc.GetOwnerReferences()[0].Name = inMemoryChannelName + svc.SetLabels(util.DispatcherLabels(inMemoryChannelName)) + return svc +} + func makeOldK8sService() *corev1.Service { svc := makeK8sService() - svc.ObjectMeta.Name = fmt.Sprintf("%s-clusterbus", Name) + svc.ObjectMeta.Name = fmt.Sprintf("%s-clusterbus", inMemoryName) return svc } diff --git a/pkg/provisioners/provisioner_util.go b/pkg/provisioners/provisioner_util.go index 4afe9d4aea0..7003250345c 100644 --- a/pkg/provisioners/provisioner_util.go +++ b/pkg/provisioners/provisioner_util.go @@ -5,6 +5,7 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -19,7 +20,10 @@ import ( "github.com/knative/pkg/system" ) -func CreateDispatcherService(ctx context.Context, client runtimeClient.Client, ccp *eventingv1alpha1.ClusterChannelProvisioner) (*corev1.Service, error) { +// ServiceOption can be used to optionally modify the K8s default that gets created for the Dispatcher in CreateDispatcherService +type ServiceOption func(*v1.Service) error + +func CreateDispatcherService(ctx context.Context, client runtimeClient.Client, ccp *eventingv1alpha1.ClusterChannelProvisioner, opts ...ServiceOption) (*corev1.Service, error) { svcKey := types.NamespacedName{ Namespace: system.Namespace(), Name: channelDispatcherServiceName(ccp.Name), @@ -29,7 +33,12 @@ func CreateDispatcherService(ctx context.Context, client runtimeClient.Client, c err := client.Get(ctx, svcKey, svc) return svc, err } - return createK8sService(ctx, client, getSvc, newDispatcherService(ccp)) + svc, err := newDispatcherService(ccp, opts...) + if err != nil { + return nil, err + } + + return createK8sService(ctx, client, getSvc, svc) } func UpdateClusterChannelProvisionerStatus(ctx context.Context, client runtimeClient.Client, u *eventingv1alpha1.ClusterChannelProvisioner) error { @@ -50,9 +59,9 @@ func UpdateClusterChannelProvisionerStatus(ctx context.Context, client runtimeCl // newDispatcherService creates a new Service for a ClusterChannelProvisioner resource. It also sets // the appropriate OwnerReferences on the resource so handleObject can discover // the ClusterChannelProvisioner resource that 'owns' it. -func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner) *corev1.Service { +func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner, opts ...ServiceOption) (*corev1.Service, error) { labels := DispatcherLabels(ccp.Name) - return &corev1.Service{ + svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: channelDispatcherServiceName(ccp.Name), Namespace: system.Namespace(), @@ -76,6 +85,13 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner) *core }, }, } + + for _, opt := range opts { + if err := opt(svc); err != nil { + return svc, err + } + } + return svc, nil } func DispatcherLabels(ccpName string) map[string]string { diff --git a/pkg/sidecar/configmap/filesystem/filesystem_watcher.go b/pkg/sidecar/configmap/filesystem/filesystem_watcher.go new file mode 100644 index 00000000000..12f5042d51e --- /dev/null +++ b/pkg/sidecar/configmap/filesystem/filesystem_watcher.go @@ -0,0 +1,126 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "errors" + + "github.com/fsnotify/fsnotify" + sidecarconfigmap "github.com/knative/eventing/pkg/sidecar/configmap" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/eventing/pkg/sidecar/swappable" + "github.com/knative/pkg/configmap" + "go.uber.org/zap" +) + +const ( + // ConfigDir is the mount path of the configMap volume. + ConfigDir = "/etc/config/fanout_sidecar" +) + +// Monitors an attached ConfigMap volume for updated configuration and calls `configUpdated` when +// the value changes. +type ConfigMapWatcher struct { + logger *zap.Logger + // The directory to read the configMap from. + dir string + // Stop the watcher by closing this channel. + watcherStopCh chan<- bool + + // The function to call when the configuration is updated. + configUpdated swappable.UpdateConfig +} + +// NewConfigMapWatcher creates a new filesystem.ConfigMapWatcher. The caller is responsible for +// calling Start(<-chan), likely via a controller-runtime Manager. +func NewConfigMapWatcher(logger *zap.Logger, dir string, updateConfig swappable.UpdateConfig) (*ConfigMapWatcher, error) { + conf, err := readConfigMap(logger, dir) + if err != nil { + logger.Error("Unable to read configMap", zap.Error(err)) + return nil, err + } + + logger.Info("Read initial configMap", zap.Any("conf", conf)) + + err = updateConfig(conf) + if err != nil { + logger.Error("Unable to use the initial configMap: %v", zap.Error(err)) + return nil, err + } + + cmw := &ConfigMapWatcher{ + logger: logger, + dir: dir, + configUpdated: updateConfig, + } + return cmw, nil +} + +// readConfigMap attempts to read the configMap from the attached volume. +func readConfigMap(logger *zap.Logger, dir string) (*multichannelfanout.Config, error) { + cm, err := configmap.Load(dir) + if err != nil { + return nil, err + } + return sidecarconfigmap.NewFanoutConfig(logger, cm) +} + +// updateConfig reads the configMap data and calls `configUpdated` with the updated value. +func (cmw *ConfigMapWatcher) updateConfig() { + conf, err := readConfigMap(cmw.logger, cmw.dir) + if err != nil { + cmw.logger.Error("Unable to read the configMap", zap.Error(err)) + return + } + err = cmw.configUpdated(conf) + if err != nil { + cmw.logger.Error("Unable to update config", zap.Error(err)) + return + } +} + +// Start implements controller runtime's manager.Runnable. +func (cmw *ConfigMapWatcher) Start(stopCh <-chan struct{}) error { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return err + } + + err = watcher.Add(cmw.dir) + if err != nil { + return err + } + + for { + select { + case _, ok := <-watcher.Events: + if !ok { + // Channel closed. + return errors.New("watcher.Events channel closed") + } + cmw.updateConfig() + case e, ok := <-watcher.Errors: + if !ok { + // Channel closed. + return errors.New("watcher.Errors channel closed") + } + cmw.logger.Error("watcher.Errors", zap.Error(e)) + case <-stopCh: + return watcher.Close() + } + } +} diff --git a/pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go b/pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go new file mode 100644 index 00000000000..84a0ac83912 --- /dev/null +++ b/pkg/sidecar/configmap/filesystem/filesystem_watcher_test.go @@ -0,0 +1,379 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesystem + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + "github.com/knative/eventing/pkg/sidecar/configmap" + "github.com/knative/eventing/pkg/sidecar/fanout" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/eventing/pkg/utils" + "go.uber.org/zap" + yaml "gopkg.in/yaml.v2" +) + +func TestReadConfigMap(t *testing.T) { + testCases := []struct { + name string + createDir bool + config string + expected *multichannelfanout.Config + expectedErr bool + }{ + { + name: "dir does not exist", + createDir: false, + }, + { + name: "no data", + createDir: true, + expectedErr: true, + }, + { + name: "invalid YAML", + createDir: true, + config: ` + key: + - value + - different indent level + `, + expectedErr: true, + }, + { + name: "valid YAML -- invalid JSON", + config: "{ nil: Key }", + createDir: true, + expectedErr: true, + }, + { + name: "unknown field", + config: "{ channelConfigs: [ { not: a-defined-field } ] }", + createDir: true, + expectedErr: true, + }, + { + name: "valid", + createDir: true, + config: ` + channelConfigs: + - namespace: default + name: c1 + fanoutConfig: + subscriptions: + - subscriberURI: event-changer.default.svc.` + utils.GetClusterDomainName() + ` + replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` + - subscriberURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` + - replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` + - namespace: default + name: c2 + fanoutConfig: + subscriptions: + - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` + - namespace: other + name: c3 + fanoutConfig: + subscriptions: + - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName(), + expected: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "c1", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + SubscriberURI: "event-changer.default.svc." + utils.GetClusterDomainName(), + ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), + }, + { + SubscriberURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), + }, + { + ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), + }, + }, + }, + }, + { + Namespace: "default", + Name: "c2", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), + }, + }, + }, + }, + { + Namespace: "other", + Name: "c3", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), + }, + }, + }, + }, + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var dir string + if tc.createDir { + var cleanup func() + dir, cleanup = createTempDir(t) + defer cleanup() + } else { + dir = "/tmp/doesNotExist" + } + writeConfigString(t, dir, tc.config) + c, e := readConfigMap(zap.NewNop(), dir) + if tc.expectedErr { + if e == nil { + t.Errorf("Expected an error, actual nil") + } + return + } + if !cmp.Equal(c, tc.expected) { + t.Errorf("Unexpected config. Expected '%v'. Actual '%v'.", tc.expected, c) + } + }) + } +} + +func TestWatch(t *testing.T) { + testCases := map[string]struct { + initialConfigErr error + initialConfig *multichannelfanout.Config + updateConfigErr error + updateConfig *multichannelfanout.Config + }{ + "error applying initial config": { + initialConfig: &multichannelfanout.Config{}, + initialConfigErr: errors.New("test-induced error"), + }, + "read initial config": { + initialConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "c1", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "foo.bar", + }, + }, + }, + }, + }, + }, + }, + "error apply updated config": { + initialConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "c1", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "foo.bar", + }, + }, + }, + }, + }, + }, + updateConfigErr: errors.New("test-induced error"), + }, + "update config": { + initialConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "c1", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "foo.bar", + }, + }, + }, + }, + }, + }, + updateConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "new-channel", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + SubscriberURI: "baz.qux", + }, + }, + }, + }, + }, + }, + }, + } + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + dir, cleanup := createTempDir(t) + defer cleanup() + writeConfig(t, dir, tc.initialConfig) + + cuc := &configUpdatedChecker{ + updateConfigErr: tc.initialConfigErr, + } + cmw, err := NewConfigMapWatcher(zap.NewNop(), dir, cuc.updateConfig) + if err != nil { + if tc.initialConfigErr != err { + t.Errorf("Unexpected error making ConfigMapWatcher. Expected: '%v'. Actual '%v'", tc.initialConfigErr, err) + } + return + } + ac := cuc.getConfig() + if !cmp.Equal(tc.initialConfig, ac) { + t.Errorf("Unexpected initial config. Expected '%v'. Actual '%v'", tc.initialConfig, ac) + } + + stopCh := make(chan struct{}) + go func() { + _ = cmw.Start(stopCh) + }() + defer func() { + close(stopCh) + }() + // Sadly, the test is flaky unless we sleep here, waiting for the file system + // watcher to truly start. + time.Sleep(100 * time.Millisecond) + + if tc.updateConfigErr != nil { + cuc.updateConfigErr = tc.updateConfigErr + } + + expected := tc.initialConfig + if tc.updateConfig != nil { + expected = tc.updateConfig + } + + cuc.updateCalled = make(chan struct{}, 1) + writeConfig(t, dir, expected) + // The watcher is running in another goroutine, give it some time to notice the + // change. + select { + case <-cuc.updateCalled: + break + case <-time.After(5 * time.Second): + t.Errorf("Time out waiting for watcher to notice change.") + } + + ac = cuc.getConfig() + if !cmp.Equal(ac, expected) { + t.Errorf("Unexpected update config. Expected '%v'. Actual '%v'", expected, ac) + } + }) + } +} + +type configUpdatedChecker struct { + configLock sync.Mutex + config *multichannelfanout.Config + updateCalled chan struct{} + updateConfigErr error +} + +func (cuc *configUpdatedChecker) updateConfig(config *multichannelfanout.Config) error { + cuc.configLock.Lock() + defer cuc.configLock.Unlock() + cuc.config = config + if cuc.updateCalled != nil { + cuc.updateCalled <- struct{}{} + } + return cuc.updateConfigErr +} + +func (cuc *configUpdatedChecker) getConfig() *multichannelfanout.Config { + cuc.configLock.Lock() + defer cuc.configLock.Unlock() + return cuc.config +} + +func createTempDir(t *testing.T) (string, func()) { + dir, err := ioutil.TempDir("", "configMapHandlerTest") + if err != nil { + t.Errorf("Unable to make temp directory: %v", err) + } + return dir, func() { + _ = os.RemoveAll(dir) + } +} + +func writeConfig(t *testing.T, dir string, config *multichannelfanout.Config) { + if config != nil { + yb, err := yaml.Marshal(config) + if err != nil { + t.Errorf("Unable to marshal the config") + } + writeConfigString(t, dir, string(yb)) + } +} + +func writeConfigString(t *testing.T, dir, config string) { + if config != "" { + // Golang editors tend to replace leading spaces with tabs. YAML is left whitespace + // sensitive, so let's replace the tabs with spaces. + leftSpaceConfig := strings.Replace(config, "\t", " ", -1) + err := atomicWriteFile(t, fmt.Sprintf("%s/%s", dir, configmap.MultiChannelFanoutConfigKey), []byte(leftSpaceConfig), 0700) + if err != nil { + t.Errorf("Problem writing the config file: %v", err) + } + } +} + +func atomicWriteFile(t *testing.T, file string, bytes []byte, perm os.FileMode) error { + // In order to more closely replicate how K8s writes ConfigMaps to the file system, we will + // atomically swap out the file by writing it to a temp directory, then renaming it into the + // directory we are watching. + tempDir, cleanup := createTempDir(t) + defer cleanup() + + tempFile := fmt.Sprintf("%s/%s", tempDir, "temp") + err := ioutil.WriteFile(tempFile, bytes, perm) + if err != nil { + return err + } + return os.Rename(tempFile, file) +} diff --git a/pkg/sidecar/configmap/parse.go b/pkg/sidecar/configmap/parse.go new file mode 100644 index 00000000000..ba6da64f12c --- /dev/null +++ b/pkg/sidecar/configmap/parse.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configmap + +import ( + "encoding/json" + "fmt" + + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "go.uber.org/zap" +) + +const ( + // MultiChannelFanoutConfigKey is the key in the ConfigMap that contains all the configuration + // data. + MultiChannelFanoutConfigKey = "multiChannelFanoutConfig" +) + +// NewFanoutConfig attempts to parse the config map's data into a multichannelfanout.Config. +// orig == NewFanoutConfig(SerializeConfig(orig)) +func NewFanoutConfig(logger *zap.Logger, data map[string]string) (*multichannelfanout.Config, error) { + str, present := data[MultiChannelFanoutConfigKey] + if !present { + logger.Error("Expected key not found", zap.String("key", MultiChannelFanoutConfigKey)) + return nil, fmt.Errorf("expected key not found: %v", MultiChannelFanoutConfigKey) + } + return multichannelfanout.Parse(logger, str) +} + +// SerializeConfig takes in a multichannelfanout.Config and generates the ConfigMap equivalent. +// orig == NewFanoutConfig(SerializeConfig(orig)) +func SerializeConfig(config multichannelfanout.Config) (map[string]string, error) { + jb, err := json.Marshal(config) + if err != nil { + return nil, err + } + return map[string]string{ + MultiChannelFanoutConfigKey: string(jb), + }, nil +} diff --git a/pkg/sidecar/configmap/parse_test.go b/pkg/sidecar/configmap/parse_test.go new file mode 100644 index 00000000000..cee271ce090 --- /dev/null +++ b/pkg/sidecar/configmap/parse_test.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configmap + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + "github.com/knative/eventing/pkg/sidecar/fanout" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/eventing/pkg/utils" + "go.uber.org/zap" +) + +func TestNewFanoutConfig(t *testing.T) { + testCases := []struct { + name string + config string + expected *multichannelfanout.Config + expectedErr bool + }{ + { + name: "no data", + expectedErr: true, + }, + { + name: "invalid YAML", + config: ` + key: + - value + - different indent level + `, + expectedErr: true, + }, + { + name: "valid YAML -- invalid JSON", + config: "{ nil: Key }", + expectedErr: true, + }, + { + name: "unknown field", + config: "{ channelConfigs: [ { not: a-defined-field } ] }", + expectedErr: true, + }, + { + name: "valid", + config: ` + channelConfigs: + - namespace: default + name: c1 + fanoutConfig: + subscriptions: + - subscriberURI: event-changer.default.svc.` + utils.GetClusterDomainName() + ` + replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` + - subscriberURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` + - replyURI: message-dumper-bar.default.svc.` + utils.GetClusterDomainName() + ` + - namespace: default + name: c2 + fanoutConfig: + subscriptions: + - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName() + ` + - namespace: other + name: c3 + fanoutConfig: + subscriptions: + - replyURI: message-dumper-foo.default.svc.` + utils.GetClusterDomainName(), + expected: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "c1", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + SubscriberURI: "event-changer.default.svc." + utils.GetClusterDomainName(), + ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), + }, + { + SubscriberURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), + }, + { + ReplyURI: "message-dumper-bar.default.svc." + utils.GetClusterDomainName(), + }, + }, + }, + }, + { + Namespace: "default", + Name: "c2", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), + }, + }, + }, + }, + { + Namespace: "other", + Name: "c3", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + ReplyURI: "message-dumper-foo.default.svc." + utils.GetClusterDomainName(), + }, + }, + }, + }, + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + data := formatData(tc.config) + c, e := NewFanoutConfig(zap.NewNop(), data) + if tc.expectedErr { + if e == nil { + t.Errorf("Expected an error, actual nil") + } + return + } + if !cmp.Equal(c, tc.expected) { + t.Errorf("Unexpected config. Expected '%v'. Actual '%v'.", tc.expected, c) + } + }) + } +} + +func TestSerializeConfig(t *testing.T) { + testCases := map[string]struct { + config *multichannelfanout.Config + }{ + "empty config": { + config: &multichannelfanout.Config{}, + }, + "full config": { + config: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "c1", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + SubscriberURI: "foo.example.com", + ReplyURI: "bar.example.com", + }, + { + ReplyURI: "qux.example.com", + }, + { + SubscriberURI: "baz.example.com", + }, + {}, + }, + }, + }, + { + Namespace: "other", + Name: "no-subs", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{}, + }, + }, + }, + }, + }, + } + + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + s, err := SerializeConfig(*tc.config) + if err != nil { + t.Errorf("Unexpected error serializing config: %v", err) + } + rt, err := NewFanoutConfig(zap.NewNop(), s) + if err != nil { + t.Errorf("Unexpected error deserializing: %v", err) + } + if diff := cmp.Diff(tc.config, rt); diff != "" { + t.Errorf("Unexpected error roundtripping the config (-want, +got): %v", diff) + } + }) + } +} + +func formatData(config string) map[string]string { + data := make(map[string]string) + if config != "" { + // Golang editors tend to replace leading spaces with tabs. YAML is left whitespace + // sensitive and disallows tabs, so let's replace the tabs with four spaces. + leftSpaceConfig := strings.Replace(config, "\t", " ", -1) + data[MultiChannelFanoutConfigKey] = leftSpaceConfig + } + return data +} diff --git a/pkg/sidecar/configmap/watcher/watcher.go b/pkg/sidecar/configmap/watcher/watcher.go new file mode 100644 index 00000000000..01dc5d7af9a --- /dev/null +++ b/pkg/sidecar/configmap/watcher/watcher.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watcher + +import ( + sidecarconfigmap "github.com/knative/eventing/pkg/sidecar/configmap" + "github.com/knative/eventing/pkg/sidecar/swappable" + "github.com/knative/pkg/configmap" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +// NewWatcher creates a new InformedWatcher that watches the specified ConfigMap and on any change +// that results in a valid multichannelfanout.Config calls configUpdated. +func NewWatcher(logger *zap.Logger, kc kubernetes.Interface, cmNamespace, cmName string, configUpdated swappable.UpdateConfig) (manager.Runnable, error) { + iw := configmap.NewInformedWatcher(kc, cmNamespace) + iw.Watch(cmName, func(cm *corev1.ConfigMap) { + config, err := sidecarconfigmap.NewFanoutConfig(logger, cm.Data) + if err != nil { + logger.Error("Could not parse ConfigMap", zap.Error(err), + zap.Any("configMap.Data", cm.Data)) + return + } + + err = configUpdated(config) + if err != nil { + logger.Error("Unable to update config", zap.Error(err)) + return + } + }) + + return iw, nil +} diff --git a/pkg/sidecar/configmap/watcher/watcher_test.go b/pkg/sidecar/configmap/watcher/watcher_test.go new file mode 100644 index 00000000000..6164c38cd63 --- /dev/null +++ b/pkg/sidecar/configmap/watcher/watcher_test.go @@ -0,0 +1,125 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watcher + +import ( + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + sidecarconfigmap "github.com/knative/eventing/pkg/sidecar/configmap" + "github.com/knative/eventing/pkg/sidecar/fanout" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/pkg/configmap" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + namespace = "test-namespace" + name = "test-name" +) + +func TestReconcile(t *testing.T) { + testCases := map[string]struct { + config map[string]string + updateConfigErr error + expectedConfig *multichannelfanout.Config + }{ + "missing key": { + config: map[string]string{}, + expectedConfig: nil, + }, + "cannot parse cm": { + config: map[string]string{ + sidecarconfigmap.MultiChannelFanoutConfigKey: "invalid config", + }, + expectedConfig: nil, + }, + "configUpdated fails": { + config: map[string]string{ + sidecarconfigmap.MultiChannelFanoutConfigKey: "", + }, + updateConfigErr: errors.New("test-error"), + expectedConfig: &multichannelfanout.Config{}, + }, + "success": { + config: map[string]string{ + sidecarconfigmap.MultiChannelFanoutConfigKey: ` + channelConfigs: + - name: foo + namespace: bar + fanoutConfig: + subscriptions: + - subscriberURI: subscriber + replyURI: reply`, + }, + expectedConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Name: "foo", + Namespace: "bar", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + { + SubscriberURI: "subscriber", + ReplyURI: "reply", + }, + }, + }, + }, + }, + }, + }, + } + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + cuc := &configUpdatedChecker{ + updateConfigErr: tc.updateConfigErr, + } + + r, err := NewWatcher(zap.NewNop(), nil, namespace, name, cuc.updateConfig) + if err != nil { + t.Errorf("Error creating watcher: %v", err) + } + iw := r.(*configmap.InformedWatcher) + iw.OnChange(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Data: tc.config, + }) + + if diff := cmp.Diff(tc.expectedConfig, cuc.config); diff != "" { + t.Errorf("Unexpected config (-want +got): %v", diff) + } + }) + } +} + +type configUpdatedChecker struct { + config *multichannelfanout.Config + updateConfigErr error +} + +func (cuc *configUpdatedChecker) updateConfig(config *multichannelfanout.Config) error { + cuc.config = config + return cuc.updateConfigErr +} diff --git a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler_test.go b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler_test.go index e6c9c30d048..32b86bdc84a 100644 --- a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler_test.go +++ b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler_test.go @@ -34,33 +34,6 @@ const ( replaceDomain = "replaceDomain" ) -func TestMakeChannelKey(t *testing.T) { - testCases := []struct { - namespace string - name string - key string - }{ - { - namespace: "default", - name: "channel", - key: "default/channel", - }, - { - namespace: "foo", - name: "bar", - key: "foo/bar", - }, - } - for _, tc := range testCases { - name := fmt.Sprintf("%s, %s -> %s", tc.namespace, tc.name, tc.key) - t.Run(name, func(t *testing.T) { - if key := makeChannelKey(tc.namespace, tc.name); key != tc.key { - t.Errorf("Unexpected ChannelKey. Expected '%v'. Actual '%v'", tc.key, key) - } - }) - } -} - func TestNewHandler(t *testing.T) { testCases := []struct { name string @@ -72,16 +45,14 @@ func TestNewHandler(t *testing.T) { config: Config{ ChannelConfigs: []ChannelConfig{ { - Namespace: "default", - Name: "duplicate", + HostName: "duplicatekey", }, { - Namespace: "default", - Name: "duplicate", + HostName: "duplicatekey", }, }, }, - createErr: "duplicate channel key: default/duplicate", + createErr: "duplicate channel key: duplicatekey", }, } @@ -241,8 +212,9 @@ func TestServeHTTP(t *testing.T) { config: Config{ ChannelConfigs: []ChannelConfig{ { - Namespace: "default", - Name: "first-channel", + Namespace: "ns", + Name: "name", + HostName: "first-channel.default", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -261,8 +233,10 @@ func TestServeHTTP(t *testing.T) { config: Config{ ChannelConfigs: []ChannelConfig{ { - Namespace: "default", - Name: "first-channel", + + Namespace: "ns", + Name: "name", + HostName: "first-channel.default", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -274,6 +248,7 @@ func TestServeHTTP(t *testing.T) { { Namespace: "default", Name: "second-channel", + HostName: "second-channel.default", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -303,7 +278,7 @@ func TestServeHTTP(t *testing.T) { h, err := NewHandler(zap.NewNop(), tc.config) if err != nil { - t.Errorf("Unexpected NewHandler error: '%v'", err) + t.Fatalf("Unexpected NewHandler error: '%v'", err) } r := requestWithChannelKey(tc.key) diff --git a/pkg/sidecar/swappable/swappable_test.go b/pkg/sidecar/swappable/swappable_test.go index 7ee97d00955..b4cc0daa872 100644 --- a/pkg/sidecar/swappable/swappable_test.go +++ b/pkg/sidecar/swappable/swappable_test.go @@ -30,9 +30,8 @@ import ( ) const ( - namespace = "default" - name = "channel1" replaceDomain = "replaceDomain" + hostName = "a.b.c.d" ) func TestHandler(t *testing.T) { @@ -44,8 +43,7 @@ func TestHandler(t *testing.T) { { ChannelConfigs: []multichannelfanout.ChannelConfig{ { - Namespace: namespace, - Name: name, + HostName: hostName, FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -59,8 +57,7 @@ func TestHandler(t *testing.T) { { ChannelConfigs: []multichannelfanout.ChannelConfig{ { - Namespace: namespace, - Name: name, + HostName: hostName, FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -96,8 +93,7 @@ func TestHandler_InvalidConfigChange(t *testing.T) { initialConfig: multichannelfanout.Config{ ChannelConfigs: []multichannelfanout.ChannelConfig{ { - Namespace: namespace, - Name: name, + HostName: hostName, FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -112,12 +108,10 @@ func TestHandler_InvalidConfigChange(t *testing.T) { // Duplicate (namespace, name). ChannelConfigs: []multichannelfanout.ChannelConfig{ { - Namespace: namespace, - Name: name, + HostName: hostName, }, { - Namespace: namespace, - Name: name, + HostName: hostName, }, }, }, @@ -183,7 +177,7 @@ func updateConfigAndTest(t *testing.T, h *Handler, config multichannelfanout.Con func assertRequestAccepted(t *testing.T, h *Handler) { w := httptest.NewRecorder() - h.ServeHTTP(w, makeRequest(namespace, name)) + h.ServeHTTP(w, makeRequest(hostName)) if w.Code != http.StatusAccepted { t.Errorf("Unexpected response code. Expected 202. Actual %v", w.Code) } @@ -196,8 +190,8 @@ func (*successHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { _ = r.Body.Close() } -func makeRequest(namespace, name string) *http.Request { - r := httptest.NewRequest("POST", fmt.Sprintf("http://%s.%s/", name, namespace), strings.NewReader("")) +func makeRequest(hostName string) *http.Request { + r := httptest.NewRequest("POST", fmt.Sprintf("http://%s/", hostName), strings.NewReader("")) return r } From bd7ae6832f033bea79a57224b6a004e4d74e5d52 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Tue, 9 Apr 2019 17:28:41 -0700 Subject: [PATCH 04/37] UTs pass, E2E tests pass with in-memory as well as kafka --- cmd/broker/ingress/main.go | 17 ----------------- cmd/fanoutsidecar/main.go | 3 +-- .../{channelwatcher.go => channel_watcher.go} | 0 pkg/provisioners/channel_util.go | 12 +++++++----- .../clusterchannelprovisioner/reconcile.go | 2 +- pkg/provisioners/provisioner_util.go | 5 ++++- .../v1alpha1/broker/resources/ingress.go | 7 ++++--- .../multi_channel_fanout_handler.go | 6 +++--- pkg/sidecar/swappable/swappable.go | 4 ---- 9 files changed, 20 insertions(+), 36 deletions(-) rename pkg/channelwatcher/{channelwatcher.go => channel_watcher.go} (100%) diff --git a/cmd/broker/ingress/main.go b/cmd/broker/ingress/main.go index efafda14aae..ea0094fba73 100644 --- a/cmd/broker/ingress/main.go +++ b/cmd/broker/ingress/main.go @@ -17,12 +17,10 @@ package main import ( - "bytes" "context" "errors" "flag" "fmt" - "io/ioutil" "log" "net/http" "net/url" @@ -223,20 +221,6 @@ func (h *handler) serveHTTP(ctx context.Context, event cloudevents.Event, resp * } func (h *handler) sendEvent(ctx context.Context, tctx cehttp.TransportContext, event cloudevents.Event) error { - - //url := "http://external-service.knative-eventing.svc.cluster.local" - resp, err1 := http.Post(h.channelURI.String(), "application/json", bytes.NewBuffer([]byte{})) - if err1 != nil { - log.Println("Error:", err1) - } - body, err1 := ioutil.ReadAll(resp.Body) - if err1 != nil { - log.Fatalln(err1) - } - log.Println(fmt.Sprintf("Reponse: %+v", resp)) - log.Println(fmt.Sprintf("ReponseBody from server: %v", string(body))) - - fmt.Println("ChannelURI: ", h.channelURI) sendingCTX := broker.SendingContext(ctx, tctx, h.channelURI) startTS := time.Now() @@ -248,7 +232,6 @@ func (h *handler) sendEvent(ctx context.Context, tctx cehttp.TransportContext, e _, err := h.ceClient.Send(sendingCTX, event) if err != nil { sendingCTX, _ = tag.New(sendingCTX, tag.Insert(TagResult, "error")) - fmt.Println("Error: ", err) } else { sendingCTX, _ = tag.New(sendingCTX, tag.Insert(TagResult, "ok")) } diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 9787fdfaecb..6392dd91e57 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -40,9 +40,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" - // uncomment this line to debug in GKE from local machine - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) var ( diff --git a/pkg/channelwatcher/channelwatcher.go b/pkg/channelwatcher/channel_watcher.go similarity index 100% rename from pkg/channelwatcher/channelwatcher.go rename to pkg/channelwatcher/channel_watcher.go diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index d52581c8116..1fc8026dd27 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -123,12 +123,14 @@ func createK8sService(ctx context.Context, client runtimeClient.Client, getSvc g } else if err != nil { return nil, err } - // spec.clusterIP is immutable and is set on existing services. If we don't set this // to the same value, we will encounter an error while updating. svc.Spec.ClusterIP = current.Spec.ClusterIP if !equality.Semantic.DeepDerivative(svc.Spec, current.Spec) || - !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) { + !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) || + // This DeepEqual is necessary to force update dispatcher services when upgrading from 0.5 to 0.6. + // Above DeepDerivative will not work because we have removed an optional field (name) from ports + !equality.Semantic.DeepEqual(svc.Spec.Ports, current.Spec.Ports) { current.Spec = svc.Spec current.ObjectMeta.Labels = addExpectedLabels(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) err = client.Update(ctx, current) @@ -265,7 +267,6 @@ func UpdateChannel(ctx context.Context, client runtimeClient.Client, u *eventing // OwnerReferences on the resource so handleObject can discover the Channel resource that 'owns' it. // As well as being garbage collected when the Channel is deleted. func newK8sService(c *eventingv1alpha1.Channel, opts ...k8sServiceOption) (*corev1.Service, error) { - // TODO: Need to check if generated name truncates the channel name in case channel name is tool long // Add annotations svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -283,8 +284,9 @@ func newK8sService(c *eventingv1alpha1.Channel, opts ...k8sServiceOption) (*core Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ { - Name: PortName, - Port: PortNumber, + Name: PortName, + Protocol: corev1.ProtocolTCP, + Port: PortNumber, }, }, }, diff --git a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go index 5794b2dbafc..678c544d46a 100644 --- a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go +++ b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go @@ -180,7 +180,7 @@ func (r *reconciler) reconcile(ctx context.Context, ccp *eventingv1alpha1.Cluste return nil } -// Since there are two provisioners "in-memry" and "in-memory-channel" but one single dispatcher service deployment, +// Since there are two provisioners "in-memory" and "in-memory-channel" but one single dispatcher service deployment, // update the label of the K8s service to always point at the same dispatcher service deployment func setDispatcherServiceSelector() util.ServiceOption { return func(svc *v1.Service) error { diff --git a/pkg/provisioners/provisioner_util.go b/pkg/provisioners/provisioner_util.go index 7003250345c..a65551fcc3f 100644 --- a/pkg/provisioners/provisioner_util.go +++ b/pkg/provisioners/provisioner_util.go @@ -78,8 +78,11 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner, opts Selector: labels, Ports: []corev1.ServicePort{ { - Name: "http", + // There is a bug in Istio where named port doesn't work when connecting using an ExternalName service + // Refer to https://github.com/istio/istio/issues/13193 for more details. + // TODO: Revert this when ISTIO fixes the issue Port: 80, + Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(8080), }, }, diff --git a/pkg/reconciler/v1alpha1/broker/resources/ingress.go b/pkg/reconciler/v1alpha1/broker/resources/ingress.go index f4eb40cd85c..8df5a57f841 100644 --- a/pkg/reconciler/v1alpha1/broker/resources/ingress.go +++ b/pkg/reconciler/v1alpha1/broker/resources/ingress.go @@ -58,9 +58,10 @@ func MakeIngress(args *IngressArgs) *appsv1.Deployment { Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: ingressLabels(args.Broker), - // Annotations: map[string]string{ - // "sidecar.istio.io/inject": "true", - // }, + // TODO: Remove this annotation once all channels stop using istio virtual service + Annotations: map[string]string{ + "sidecar.istio.io/inject": "true", + }, }, Spec: corev1.PodSpec{ ServiceAccountName: args.ServiceAccountName, diff --git a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go index a2f24cbc6d8..282a1c0985d 100644 --- a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go +++ b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go @@ -42,9 +42,9 @@ type Config struct { // ChannelConfig is the configuration for a single Channel. type ChannelConfig struct { - Namespace string `json:"namespace"` - Name string `json:"name"` - HostName string + Namespace string `json:"namespace"` + Name string `json:"name"` + HostName string `json:"hostname"` FanoutConfig fanout.Config `json:"fanoutConfig"` } diff --git a/pkg/sidecar/swappable/swappable.go b/pkg/sidecar/swappable/swappable.go index 3cff72630df..70de3edab2c 100644 --- a/pkg/sidecar/swappable/swappable.go +++ b/pkg/sidecar/swappable/swappable.go @@ -24,7 +24,6 @@ package swappable import ( "errors" - "fmt" "net/http" "sync" "sync/atomic" @@ -103,9 +102,6 @@ func (h *Handler) UpdateConfig(config *multichannelfanout.Config) error { // ServeHTTP delegates all HTTP requests to the current multichannelfanout.Handler. func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // TODO: delete this debugging code - fmt.Sprintf("Request: %+v", r) - // Hand work off to the current multi channel fanout handler. h.logger.Debug("ServeHTTP request received") h.getMultiChannelFanoutHandler().ServeHTTP(w, r) From df4487f10c86dd6b35e5ab47a516aa039db8829d Mon Sep 17 00:00:00 2001 From: akashrv Date: Wed, 10 Apr 2019 06:51:24 -0700 Subject: [PATCH 05/37] fixed uts that failed due to last K8s service change --- .../controller/clusterchannelprovisioner/reconcile_test.go | 2 +- pkg/provisioners/channel_util_test.go | 5 +++-- .../inmemory/clusterchannelprovisioner/reconcile_test.go | 2 +- pkg/provisioners/provisioner_util_test.go | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/contrib/natss/pkg/controller/clusterchannelprovisioner/reconcile_test.go b/contrib/natss/pkg/controller/clusterchannelprovisioner/reconcile_test.go index e2f043d9231..57a70ade635 100644 --- a/contrib/natss/pkg/controller/clusterchannelprovisioner/reconcile_test.go +++ b/contrib/natss/pkg/controller/clusterchannelprovisioner/reconcile_test.go @@ -254,7 +254,7 @@ func makeK8sService() *corev1.Service { Selector: provisioners.DispatcherLabels(Name), Ports: []corev1.ServicePort{ { - Name: "http", + Protocol: corev1.ProtocolTCP, Port: 80, TargetPort: intstr.FromInt(8080), }, diff --git a/pkg/provisioners/channel_util_test.go b/pkg/provisioners/channel_util_test.go index 6aded3735d3..4f89ffb7b56 100644 --- a/pkg/provisioners/channel_util_test.go +++ b/pkg/provisioners/channel_util_test.go @@ -597,8 +597,9 @@ func makeK8sService() *corev1.Service { Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ { - Name: PortName, - Port: PortNumber, + Name: PortName, + Port: PortNumber, + Protocol: corev1.ProtocolTCP, }, }, }, diff --git a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go index 036e1424235..9d8934d0c61 100644 --- a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go +++ b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go @@ -386,9 +386,9 @@ func makeK8sService() *corev1.Service { Selector: util.DispatcherLabels(inMemoryChannelName), Ports: []corev1.ServicePort{ { - Name: "http", Port: 80, TargetPort: intstr.FromInt(8080), + Protocol: corev1.ProtocolTCP, }, }, }, diff --git a/pkg/provisioners/provisioner_util_test.go b/pkg/provisioners/provisioner_util_test.go index cdf2eb724e6..fcd6a9dafe1 100644 --- a/pkg/provisioners/provisioner_util_test.go +++ b/pkg/provisioners/provisioner_util_test.go @@ -180,9 +180,9 @@ func makeDispatcherService() *corev1.Service { Selector: DispatcherLabels(clusterChannelProvisionerName), Ports: []corev1.ServicePort{ { - Name: "http", Port: 80, TargetPort: intstr.FromInt(8080), + Protocol: corev1.ProtocolTCP, }, }, }, From 23ae8b4457032e08f4f5cb5422ce5135f1b46fb2 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Wed, 10 Apr 2019 16:14:55 -0700 Subject: [PATCH 06/37] Removed unnecessary space from a line --- config/provisioners/in-memory-channel/in-memory-channel.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/provisioners/in-memory-channel/in-memory-channel.yaml b/config/provisioners/in-memory-channel/in-memory-channel.yaml index 3f466ff5e41..d1b30298273 100644 --- a/config/provisioners/in-memory-channel/in-memory-channel.yaml +++ b/config/provisioners/in-memory-channel/in-memory-channel.yaml @@ -188,7 +188,7 @@ spec: role: dispatcher template: metadata: - annotations: + annotations: sidecar.istio.io/inject: "true" labels: *labels spec: From bb7ab3e84fbbee2e7b6144e5df9d199512840e04 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Wed, 10 Apr 2019 16:19:26 -0700 Subject: [PATCH 07/37] dding istio annotation to test POD. This will ve needed when running E2E tests against channels other than in-memory --- test/crd.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/crd.go b/test/crd.go index 139b33de079..dd29cd53534 100644 --- a/test/crd.go +++ b/test/crd.go @@ -74,6 +74,9 @@ func Configuration(name string, namespace string, imagePath string) *servingv1al // ClusterChannelProvisioner returns a ClusterChannelProvisioner for a given name func ClusterChannelProvisioner(name string) *corev1.ObjectReference { + if name == "" { + return nil + } return pkgTest.CoreV1ObjectReference("ClusterChannelProvisioner", eventsApiVersion, name) } @@ -164,8 +167,9 @@ func EventSenderPod(name string, namespace string, sink string, event CloudEvent return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, + Name: name, + Namespace: namespace, + Annotations: map[string]string{"sidecar.istio.io/inject": "true"}, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{{ From c646dcd49ab85272d84a605abfb1dd124e24a832 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Thu, 11 Apr 2019 10:36:16 -0700 Subject: [PATCH 08/37] Bug fix to set clusterIp of K8s service only when it is not of type ExternalName --- pkg/provisioners/channel_util.go | 6 ++++-- pkg/provisioners/inmemory/channel/reconcile_test.go | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index 1fc8026dd27..3ca480a6582 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -69,7 +69,7 @@ type k8sServiceOption func(*corev1.Service) error func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { return func(svc *corev1.Service) error { svc.Spec = corev1.ServiceSpec{ - Type: "ExternalName", + Type: corev1.ServiceTypeExternalName, ExternalName: names.ServiceHostName(channelDispatcherServiceName(c.Spec.Provisioner.Name), system.Namespace()), } return nil @@ -125,7 +125,9 @@ func createK8sService(ctx context.Context, client runtimeClient.Client, getSvc g } // spec.clusterIP is immutable and is set on existing services. If we don't set this // to the same value, we will encounter an error while updating. - svc.Spec.ClusterIP = current.Spec.ClusterIP + if svc.Spec.Type != corev1.ServiceTypeExternalName { + svc.Spec.ClusterIP = current.Spec.ClusterIP + } if !equality.Semantic.DeepDerivative(svc.Spec, current.Spec) || !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) || // This DeepEqual is necessary to force update dispatcher services when upgrading from 0.5 to 0.6. diff --git a/pkg/provisioners/inmemory/channel/reconcile_test.go b/pkg/provisioners/inmemory/channel/reconcile_test.go index 211f3fc5a03..76aa9e06f95 100644 --- a/pkg/provisioners/inmemory/channel/reconcile_test.go +++ b/pkg/provisioners/inmemory/channel/reconcile_test.go @@ -482,7 +482,7 @@ func makeK8sService(pn ...string) *corev1.Service { }, Spec: corev1.ServiceSpec{ ExternalName: names.ServiceHostName(fmt.Sprintf("%s-dispatcher", getProvisionerName(pn)), system.Namespace()), - Type: "ExternalName", + Type: corev1.ServiceTypeExternalName, }, } } From 485f6b3b93c88099c431a578e1e54c5b4901db85 Mon Sep 17 00:00:00 2001 From: akashrv Date: Thu, 11 Apr 2019 10:40:08 -0700 Subject: [PATCH 09/37] WIP kafka channel --- cmd/fanoutsidecar/main.go | 23 +-- contrib/kafka/cmd/dispatcher/main.go | 68 +++++---- contrib/kafka/config/kafka.yaml | 9 +- .../kafka/pkg/controller/channel/provider.go | 24 +-- .../kafka/pkg/controller/channel/reconcile.go | 140 +----------------- .../pkg/controller/channel/reconcile_test.go | 70 ++++----- pkg/provisioners/channel_util.go | 3 +- .../v1alpha1/broker/resources/ingress.go | 4 - pkg/sidecar/multichannelfanout/config.go | 67 +++++++++ .../multi_channel_fanout_handler.go | 14 -- 10 files changed, 156 insertions(+), 266 deletions(-) create mode 100644 pkg/sidecar/multichannelfanout/config.go diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 6392dd91e57..ed2fe9a2d6d 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -30,7 +30,6 @@ import ( "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/channelwatcher" "github.com/knative/eventing/pkg/logging" - "github.com/knative/eventing/pkg/sidecar/fanout" "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/sidecar/swappable" "go.uber.org/zap" @@ -145,7 +144,7 @@ func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.Wat logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) return err } - config := multiChannelFanoutConfig(channels) + config := multichannelfanout.NewConfigFromChannels(channels) return updateConfig(config) } } @@ -175,26 +174,6 @@ func shouldWatch(ch *v1alpha1.Channel) bool { return false } -func multiChannelFanoutConfig(channels []v1alpha1.Channel) *multichannelfanout.Config { - cc := make([]multichannelfanout.ChannelConfig, 0) - for _, c := range channels { - channelConfig := multichannelfanout.ChannelConfig{ - Namespace: c.Namespace, - Name: c.Name, - HostName: c.Status.Address.Hostname, - } - if c.Spec.Subscribable != nil { - channelConfig.FanoutConfig = fanout.Config{ - Subscriptions: c.Spec.Subscribable.Subscribers, - } - } - cc = append(cc, channelConfig) - } - return &multichannelfanout.Config{ - ChannelConfigs: cc, - } -} - // runnableServer is a small wrapper around http.Server so that it matches the manager.Runnable // interface. type runnableServer struct { diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index 9ef18689623..6748e52c088 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -17,33 +17,27 @@ limitations under the License. package main import ( + "context" "flag" - "fmt" "log" - "os" + "github.com/knative/eventing/contrib/kafka/pkg/controller" provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" - "github.com/knative/eventing/pkg/sidecar/configmap/watcher" - "github.com/knative/eventing/pkg/utils" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/channelwatcher" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/eventing/pkg/sidecar/swappable" "github.com/knative/pkg/signals" - "github.com/knative/pkg/system" "go.uber.org/zap" - "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" ) func main() { - configMapName := os.Getenv("DISPATCHER_CONFIGMAP_NAME") - if configMapName == "" { - configMapName = provisionerController.DispatcherConfigMapName - } - configMapNamespace := os.Getenv("DISPATCHER_CONFIGMAP_NAMESPACE") - if configMapNamespace == "" { - configMapNamespace = system.Namespace() - } - flag.Parse() logger, err := zap.NewProduction() if err != nil { @@ -68,17 +62,10 @@ func main() { logger.Fatal("Unable to add kafkaDispatcher", zap.Error(err)) } - kc, err := kubernetes.NewForConfig(mgr.GetConfig()) - if err != nil { - logger.Fatal("unable to create kubernetes client.", zap.Error(err)) - } - - cmw, err := watcher.NewWatcher(logger, kc, configMapNamespace, configMapName, kafkaDispatcher.UpdateConfig) + v1alpha1.AddToScheme(mgr.GetScheme()) + channelwatcher.New(mgr, logger, updateChannelConfig(kafkaDispatcher.UpdateConfig)) if err != nil { - logger.Fatal("unable to create configMap watcher", zap.String("configMap", fmt.Sprintf("%s/%s", configMapNamespace, configMapName))) - } - if err = mgr.Add(utils.NewBlockingStart(logger, cmw)); err != nil { - logger.Fatal("Unable to add the configMap watcher to the manager", zap.Error(err)) + logger.Fatal("Unable to create channel watcher.", zap.Error(err)) } // set up signals so we handle the first shutdown signal gracefully @@ -89,3 +76,34 @@ func main() { } logger.Info("Exiting...") } +func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.WatchHandlerFunc { + return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { + channels, err := listAllChannels(ctx, c) + if err != nil { + logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) + return err + } + config := multichannelfanout.NewConfigFromChannels(channels) + return updateConfig(config) + } +} + +func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) { + channels := make([]v1alpha1.Channel, 0) + cl := &v1alpha1.ChannelList{} + if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { + return nil, err + } + for _, c := range cl.Items { + if c.Status.IsReady() && shouldWatch(&c) { + channels = append(channels, c) + } + } + return channels, nil +} + +func shouldWatch(ch *v1alpha1.Channel) bool { + return ch.Spec.Provisioner != nil && + ch.Spec.Provisioner.Namespace == "" && + ch.Spec.Provisioner.Name == controller.Name +} diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index dc28a8636da..d0b25649e00 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -108,7 +108,7 @@ metadata: namespace: knative-eventing data: # Broker URL's for the provisioner. Replace this with the URL's for your kafka cluster. - bootstrap_servers: kafkabroker.kafka:9092 + bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. @@ -211,13 +211,6 @@ spec: containers: - name: dispatcher image: github.com/knative/eventing/contrib/kafka/cmd/dispatcher - env: - - name: DISPATCHER_CONFIGMAP_NAME - value: kafka-channel-dispatcher - - name: DISPATCHER_CONFIGMAP_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace volumeMounts: - name: kafka-channel-controller-config mountPath: /etc/config-provisioner diff --git a/contrib/kafka/pkg/controller/channel/provider.go b/contrib/kafka/pkg/controller/channel/provider.go index 7c9d413d246..73eab2e8d22 100644 --- a/contrib/kafka/pkg/controller/channel/provider.go +++ b/contrib/kafka/pkg/controller/channel/provider.go @@ -18,7 +18,6 @@ package channel import ( "github.com/Shopify/sarama" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -49,11 +48,10 @@ var ( ) type reconciler struct { - client client.Client - recorder record.EventRecorder - logger *zap.Logger - config *common.KafkaProvisionerConfig - configMapKey client.ObjectKey + client client.Client + recorder record.EventRecorder + logger *zap.Logger + config *common.KafkaProvisionerConfig // Using a shared kafkaClusterAdmin does not work currently because of an issue with // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. kafkaClusterAdmin sarama.ClusterAdmin @@ -67,10 +65,9 @@ func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfi // Setup a new controller to Reconcile Channel. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ - recorder: mgr.GetRecorder(controllerAgentName), - logger: logger, - config: config, - configMapKey: defaultConfigMapKey, + recorder: mgr.GetRecorder(controllerAgentName), + logger: logger, + config: config, }, }) if err != nil { @@ -89,13 +86,6 @@ func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfi return nil, err } - // Watch the VirtualServices that are owned by Channels. - err = c.Watch(&source.Kind{Type: &istiov1alpha3.VirtualService{}}, &handler.EnqueueRequestForOwner{OwnerType: &eventingv1alpha1.Channel{}, IsController: true}) - if err != nil { - logger.Error("unable to watch VirtualServices.", zap.Error(err)) - return nil, err - } - return c, nil } diff --git a/contrib/kafka/pkg/controller/channel/reconcile.go b/contrib/kafka/pkg/controller/channel/reconcile.go index cf64955bc43..34a7e1c9b71 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile.go +++ b/contrib/kafka/pkg/controller/channel/reconcile.go @@ -23,10 +23,8 @@ import ( "github.com/Shopify/sarama" "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -35,10 +33,6 @@ import ( util "github.com/knative/eventing/pkg/provisioners" topicUtils "github.com/knative/eventing/pkg/provisioners/utils" eventingNames "github.com/knative/eventing/pkg/reconciler/names" - "github.com/knative/eventing/pkg/sidecar/configmap" - "github.com/knative/eventing/pkg/sidecar/fanout" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "k8s.io/apimachinery/pkg/api/equality" ) const ( @@ -97,30 +91,28 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err return reconcile.Result{}, nil } - newChannel := channel.DeepCopy() - - newChannel.Status.InitializeConditions() + channel.Status.InitializeConditions() var requeue = false if clusterChannelProvisioner.Status.IsReady() { // Reconcile this copy of the Channel and then write back any status // updates regardless of whether the reconcile error out. - requeue, err = r.reconcile(ctx, newChannel) + requeue, err = r.reconcile(ctx, channel) } else { - newChannel.Status.MarkNotProvisioned("NotProvisioned", "ClusterChannelProvisioner %s is not ready", clusterChannelProvisioner.Name) + channel.Status.MarkNotProvisioned("NotProvisioned", "ClusterChannelProvisioner %s is not ready", clusterChannelProvisioner.Name) err = fmt.Errorf("ClusterChannelProvisioner %s is not ready", clusterChannelProvisioner.Name) } if err != nil { r.logger.Error("Dispatcher reconciliation failed", zap.Error(err)) - r.recorder.Eventf(newChannel, v1.EventTypeWarning, dispatcherReconcileFailed, "Dispatcher reconciliation failed: %v", err) + r.recorder.Eventf(channel, v1.EventTypeWarning, dispatcherReconcileFailed, "Dispatcher reconciliation failed: %v", err) } else { r.logger.Debug("Channel reconciled") } - if updateChannelErr := util.UpdateChannel(ctx, r.client, newChannel); updateChannelErr != nil { + if updateChannelErr := util.UpdateChannel(ctx, r.client, channel); updateChannelErr != nil { r.logger.Info("failed to update channel status", zap.Error(updateChannelErr)) - r.recorder.Eventf(newChannel, v1.EventTypeWarning, dispatcherUpdateStatusFailed, "Failed to update Channel's dispatcher status: %v", err) + r.recorder.Eventf(channel, v1.EventTypeWarning, dispatcherUpdateStatusFailed, "Failed to update Channel's dispatcher status: %v", err) return reconcile.Result{}, updateChannelErr } @@ -134,13 +126,6 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err // boolean indicates if this Channel should be immediately requeued for another reconcile loop. The // returned error indicates an error during reconciliation. func (r *reconciler) reconcile(ctx context.Context, channel *eventingv1alpha1.Channel) (bool, error) { - - // We always need to sync the Channel config, so do it first. - if err := r.syncChannelConfig(ctx); err != nil { - r.logger.Info("error updating syncing the Channel config", zap.Error(err)) - return false, err - } - // We don't currently initialize r.kafkaClusterAdmin, hence we end up creating the cluster admin client every time. // This is because of an issue with Shopify/sarama. See https://github.com/Shopify/sarama/issues/1162. // Once the issue is fixed we should use a shared cluster admin client. Also, r.kafkaClusterAdmin is currently @@ -177,19 +162,12 @@ func (r *reconciler) reconcile(ctx context.Context, channel *eventingv1alpha1.Ch return false, err } - svc, err := util.CreateK8sService(ctx, r.client, channel) + svc, err := util.CreateK8sService(ctx, r.client, channel, util.ExternalService(channel)) if err != nil { r.logger.Info("error creating the Channel's K8s Service", zap.Error(err)) return false, err } channel.Status.SetAddress(eventingNames.ServiceHostName(svc.Name, svc.Namespace)) - - _, err = util.CreateVirtualService(ctx, r.client, channel, svc) - if err != nil { - r.logger.Info("error creating the Virtual Service for the Channel", zap.Error(err)) - return false, err - } - channel.Status.MarkProvisioned() // close the connection @@ -268,110 +246,6 @@ func (r *reconciler) getClusterChannelProvisioner() (*eventingv1alpha1.ClusterCh return clusterChannelProvisioner, nil } -func (r *reconciler) syncChannelConfig(ctx context.Context) error { - channels, err := r.listAllChannels(ctx) - if err != nil { - r.logger.Info("Unable to list channels", zap.Error(err)) - return err - } - config := multiChannelFanoutConfig(channels) - return r.writeConfigMap(ctx, config) -} - -func (r *reconciler) writeConfigMap(ctx context.Context, config *multichannelfanout.Config) error { - logger := r.logger.With(zap.Any("configMap", r.configMapKey)) - - updated, err := configmap.SerializeConfig(*config) - if err != nil { - r.logger.Error("Unable to serialize config", zap.Error(err), zap.Any("config", config)) - return err - } - - cm := &corev1.ConfigMap{} - err = r.client.Get(ctx, r.configMapKey, cm) - if errors.IsNotFound(err) { - cm = r.createNewConfigMap(updated) - err = r.client.Create(ctx, cm) - if err != nil { - logger.Info("Unable to create ConfigMap", zap.Error(err)) - return err - } - } - if err != nil { - logger.Info("Unable to get ConfigMap", zap.Error(err)) - return err - } - - if equality.Semantic.DeepEqual(cm.Data, updated) { - // Nothing to update. - return nil - } - - cm.Data = updated - return r.client.Update(ctx, cm) -} - -func (r *reconciler) createNewConfigMap(data map[string]string) *corev1.ConfigMap { - return &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: r.configMapKey.Namespace, - Name: r.configMapKey.Name, - }, - Data: data, - } -} - -func multiChannelFanoutConfig(channels []eventingv1alpha1.Channel) *multichannelfanout.Config { - cc := make([]multichannelfanout.ChannelConfig, 0) - for _, c := range channels { - channelConfig := multichannelfanout.ChannelConfig{ - Namespace: c.Namespace, - Name: c.Name, - } - if c.Spec.Subscribable != nil { - channelConfig.FanoutConfig = fanout.Config{ - Subscriptions: c.Spec.Subscribable.Subscribers, - } - } - cc = append(cc, channelConfig) - } - return &multichannelfanout.Config{ - ChannelConfigs: cc, - } -} - -func (r *reconciler) listAllChannels(ctx context.Context) ([]eventingv1alpha1.Channel, error) { - clusterChannelProvisioner, err := r.getClusterChannelProvisioner() - if err != nil { - return nil, err - } - - channels := make([]eventingv1alpha1.Channel, 0) - - opts := &client.ListOptions{ - // Set Raw because if we need to get more than one page, then we will put the continue token - // into opts.Raw.Continue. - Raw: &metav1.ListOptions{}, - } - for { - cl := &eventingv1alpha1.ChannelList{} - if err = r.client.List(ctx, opts, cl); err != nil { - return nil, err - } - - for _, c := range cl.Items { - if r.shouldReconcile(&c, clusterChannelProvisioner) { - channels = append(channels, c) - } - } - if cl.Continue != "" { - opts.Raw.Continue = cl.Continue - } else { - return channels, nil - } - } -} - func createKafkaAdminClient(config *controller.KafkaProvisionerConfig) (sarama.ClusterAdmin, error) { saramaConf := sarama.NewConfig() saramaConf.Version = sarama.V1_1_0_0 diff --git a/contrib/kafka/pkg/controller/channel/reconcile_test.go b/contrib/kafka/pkg/controller/channel/reconcile_test.go index 67253e20ce6..bff18603f02 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile_test.go +++ b/contrib/kafka/pkg/controller/channel/reconcile_test.go @@ -30,10 +30,12 @@ import ( eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" util "github.com/knative/eventing/pkg/provisioners" + "github.com/knative/eventing/pkg/reconciler/names" controllertesting "github.com/knative/eventing/pkg/reconciler/testing" "github.com/knative/eventing/pkg/utils" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "github.com/knative/pkg/system" _ "github.com/knative/pkg/system/testing" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -142,13 +144,13 @@ var testCases = []controllertesting.TestCase{ InitialState: []runtime.Object{ getNewClusterChannelProvisioner(clusterChannelProvisionerName, true), getNewChannel(channelName, clusterChannelProvisionerName), - makeVirtualService(), }, WantResult: reconcile.Result{ Requeue: true, }, WantPresent: []runtime.Object{ getNewChannelWithStatusAndFinalizer(channelName, clusterChannelProvisionerName), + makeK8sService(), }, }, { @@ -156,7 +158,6 @@ var testCases = []controllertesting.TestCase{ InitialState: []runtime.Object{ getNewClusterChannelProvisioner(clusterChannelProvisionerName, true), getNewChannelWithStatusAndFinalizer(channelName, clusterChannelProvisionerName), - makeVirtualService(), }, WantPresent: []runtime.Object{ getNewChannelProvisionedStatus(channelName, clusterChannelProvisionerName), @@ -523,18 +524,32 @@ func getNewClusterChannelProvisioner(name string, isReady bool) *eventingv1alpha return clusterChannelProvisioner } -func makeVirtualService() *istiov1alpha3.VirtualService { - return &istiov1alpha3.VirtualService{ +func om(namespace, name string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name), + } +} + +func getControllerConfig() *controller.KafkaProvisionerConfig { + return &controller.KafkaProvisionerConfig{ + Brokers: []string{"test-broker"}, + } +} + +func makeK8sService() *corev1.Service { + return &corev1.Service{ TypeMeta: metav1.TypeMeta{ - APIVersion: istiov1alpha3.SchemeGroupVersion.String(), - Kind: "VirtualService", + APIVersion: "v1", + Kind: "Service", }, ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-channel", testNS), - Namespace: testNS, + GenerateName: fmt.Sprintf("%s-channel-", channelName), + Namespace: testNS, Labels: map[string]string{ - "channel": channelName, - "provisioner": clusterChannelProvisionerName, + util.EventingChannelLabel: channelName, + util.OldEventingChannelLabel: channelName, }, OwnerReferences: []metav1.OwnerReference{ { @@ -547,38 +562,9 @@ func makeVirtualService() *istiov1alpha3.VirtualService { }, }, }, - Spec: istiov1alpha3.VirtualServiceSpec{ - Hosts: []string{ - serviceAddress, - fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()), - }, - HTTP: []istiov1alpha3.HTTPRoute{{ - Rewrite: &istiov1alpha3.HTTPRewrite{ - Authority: fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()), - }, - Route: []istiov1alpha3.DestinationWeight{{ - Destination: istiov1alpha3.Destination{ - Host: "kafka-provisioner.knative-testing.svc." + utils.GetClusterDomainName(), - Port: istiov1alpha3.PortSelector{ - Number: util.PortNumber, - }, - }}, - }}, - }, + Spec: corev1.ServiceSpec{ + ExternalName: names.ServiceHostName(fmt.Sprintf("%s-dispatcher", clusterChannelProvisionerName), system.Namespace()), + Type: "ExternalName", }, } } - -func om(namespace, name string) metav1.ObjectMeta { - return metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name), - } -} - -func getControllerConfig() *controller.KafkaProvisionerConfig { - return &controller.KafkaProvisionerConfig{ - Brokers: []string{"test-broker"}, - } -} diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index 1fc8026dd27..fd9caf06d52 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -69,6 +69,7 @@ type k8sServiceOption func(*corev1.Service) error func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { return func(svc *corev1.Service) error { svc.Spec = corev1.ServiceSpec{ + ClusterIP: "", Type: "ExternalName", ExternalName: names.ServiceHostName(channelDispatcherServiceName(c.Spec.Provisioner.Name), system.Namespace()), } @@ -125,7 +126,7 @@ func createK8sService(ctx context.Context, client runtimeClient.Client, getSvc g } // spec.clusterIP is immutable and is set on existing services. If we don't set this // to the same value, we will encounter an error while updating. - svc.Spec.ClusterIP = current.Spec.ClusterIP + // svc.Spec.ClusterIP = current.Spec.ClusterIP if !equality.Semantic.DeepDerivative(svc.Spec, current.Spec) || !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) || // This DeepEqual is necessary to force update dispatcher services when upgrading from 0.5 to 0.6. diff --git a/pkg/reconciler/v1alpha1/broker/resources/ingress.go b/pkg/reconciler/v1alpha1/broker/resources/ingress.go index 8df5a57f841..39084b4b6d5 100644 --- a/pkg/reconciler/v1alpha1/broker/resources/ingress.go +++ b/pkg/reconciler/v1alpha1/broker/resources/ingress.go @@ -58,10 +58,6 @@ func MakeIngress(args *IngressArgs) *appsv1.Deployment { Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: ingressLabels(args.Broker), - // TODO: Remove this annotation once all channels stop using istio virtual service - Annotations: map[string]string{ - "sidecar.istio.io/inject": "true", - }, }, Spec: corev1.PodSpec{ ServiceAccountName: args.ServiceAccountName, diff --git a/pkg/sidecar/multichannelfanout/config.go b/pkg/sidecar/multichannelfanout/config.go new file mode 100644 index 00000000000..84f29b85089 --- /dev/null +++ b/pkg/sidecar/multichannelfanout/config.go @@ -0,0 +1,67 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package multichannelfanout provides an http.Handler that takes in one request to a Knative +// Channel and fans it out to N other requests. Logically, it represents multiple Knative Channels. +// It is made up of a map, map[channel]fanout.Handler and each incoming request is inspected to +// determine which Channel it is on. This Handler delegates the HTTP handling to the fanout.Handler +// corresponding to the incoming request's Channel. +// It is often used in conjunction with a swappable.Handler. The swappable.Handler delegates all its +// requests to the multichannelfanout.Handler. When a new configuration is available, a new +// multichannelfanout.Handler is created and swapped in for all subsequent requests. The old +// multichannelfanout.Handler is discarded. + +package multichannelfanout + +import ( + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/sidecar/fanout" +) + +// Config for a multichannelfanout.Handler. +type Config struct { + // The configuration of each channel in this handler. + ChannelConfigs []ChannelConfig `json:"channelConfigs"` +} + +// ChannelConfig is the configuration for a single Channel. +type ChannelConfig struct { + Namespace string `json:"namespace"` + Name string `json:"name"` + HostName string `json:"hostname"` + FanoutConfig fanout.Config `json:"fanoutConfig"` +} + +// NewConfigFromChannels creates a new Config from the list of channels +func NewConfigFromChannels(channels []v1alpha1.Channel) *Config { + cc := make([]ChannelConfig, 0) + for _, c := range channels { + channelConfig := ChannelConfig{ + Namespace: c.Namespace, + Name: c.Name, + HostName: c.Status.Address.Hostname, + } + if c.Spec.Subscribable != nil { + channelConfig.FanoutConfig = fanout.Config{ + Subscriptions: c.Spec.Subscribable.Subscribers, + } + } + cc = append(cc, channelConfig) + } + return &Config{ + ChannelConfigs: cc, + } +} diff --git a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go index 282a1c0985d..e4a7e1c9193 100644 --- a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go +++ b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go @@ -34,20 +34,6 @@ import ( "go.uber.org/zap" ) -// Config for a multichannelfanout.Handler. -type Config struct { - // The configuration of each channel in this handler. - ChannelConfigs []ChannelConfig `json:"channelConfigs"` -} - -// ChannelConfig is the configuration for a single Channel. -type ChannelConfig struct { - Namespace string `json:"namespace"` - Name string `json:"name"` - HostName string `json:"hostname"` - FanoutConfig fanout.Config `json:"fanoutConfig"` -} - // makeChannelKeyFromConfig creates the channel key for a given channelConfig. It is a helper around // MakeChannelKey. func makeChannelKeyFromConfig(config ChannelConfig) string { From 37bae81f1d6fc760d94ea100a7e6980d3bf48075 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Fri, 12 Apr 2019 10:01:25 -0700 Subject: [PATCH 10/37] WIP kafka - UTs and E2E pass More UTs needded --- cmd/fanoutsidecar/main.go | 4 +- contrib/gcppubsub/pkg/dispatcher/cmd/main.go | 5 +- .../pkg/dispatcher/receiver/receiver.go | 10 +- .../pkg/dispatcher/receiver/receiver_test.go | 11 +- contrib/kafka/cmd/dispatcher/main.go | 1 - contrib/kafka/config/kafka.yaml | 7 ++ .../pkg/controller/channel/reconcile_test.go | 9 +- contrib/kafka/pkg/dispatcher/dispatcher.go | 57 +++++++++- .../kafka/pkg/dispatcher/dispatcher_test.go | 102 +++++++++++++++--- .../pkg/dispatcher/dispatcher/dispatcher.go | 6 +- pkg/channelwatcher/channel_watcher.go | 4 +- pkg/provisioners/channel_util.go | 3 +- .../inmemory/channel/reconcile.go | 18 +--- pkg/provisioners/inmemory/controller/main.go | 3 +- pkg/provisioners/message_receiver.go | 39 +++++-- pkg/provisioners/message_receiver_test.go | 5 +- pkg/provisioners/provisioner_util.go | 5 +- pkg/sidecar/fanout/fanout_handler.go | 11 +- pkg/sidecar/fanout/fanout_handler_test.go | 13 ++- .../multi_channel_fanout_handler.go | 6 +- 20 files changed, 251 insertions(+), 68 deletions(-) diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index ed2fe9a2d6d..f5a803296a3 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -131,7 +131,9 @@ func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfi logger.Error("Error creating new maanger.", zap.Error(err)) return nil, err } - v1alpha1.AddToScheme(mgr.GetScheme()) + if err := v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { + logger.Error("Error adding eventinging scheme to manager.", zap.Error(err)) + } channelwatcher.New(mgr, logger, updateChannelConfig(configUpdated)) return mgr, nil diff --git a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go index a2a82a4e638..078e08bf2d4 100644 --- a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go +++ b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go @@ -61,7 +61,10 @@ func main() { // PubSub) and the dispatcher (takes messages in PubSub and sends them in cluster) in this // binary. - _, runnables := receiver.New(logger.Desugar(), mgr.GetClient(), util.GcpPubSubClientCreator) + _, runnables, err := receiver.New(logger.Desugar(), mgr.GetClient(), util.GcpPubSubClientCreator) + if err != nil { + logger.Fatal("Unable to create new receiver and runnable", zap.Error(err)) + } for _, runnable := range runnables { err = mgr.Add(runnable) if err != nil { diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go index 702ee7fd5f4..665bb80dda5 100644 --- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go +++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go @@ -44,7 +44,7 @@ type Receiver struct { // New creates a new Receiver and its associated MessageReceiver. The caller is responsible for // Start()ing the returned MessageReceiver. -func New(logger *zap.Logger, client client.Client, pubSubClientCreator util.PubSubClientCreator) (*Receiver, []manager.Runnable) { +func New(logger *zap.Logger, client client.Client, pubSubClientCreator util.PubSubClientCreator) (*Receiver, []manager.Runnable, error) { r := &Receiver{ logger: logger, client: client, @@ -52,10 +52,14 @@ func New(logger *zap.Logger, client client.Client, pubSubClientCreator util.PubS pubSubClientCreator: pubSubClientCreator, cache: cache.NewTTL(), } - return r, []manager.Runnable{r.newMessageReceiver(), r.cache} + receiver, err := r.newMessageReceiver() + if err != nil { + return nil, nil, err + } + return r, []manager.Runnable{receiver, r.cache}, nil } -func (r *Receiver) newMessageReceiver() *provisioners.MessageReceiver { +func (r *Receiver) newMessageReceiver() (*provisioners.MessageReceiver, error) { return provisioners.NewMessageReceiver(r.sendEventToTopic, r.logger.Sugar()) } diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go index 6d9b2353b30..d6d69db23b6 100644 --- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go +++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go @@ -129,14 +129,21 @@ func TestReceiver(t *testing.T) { } for n, tc := range testCases { t.Run(n, func(t *testing.T) { - mr, _ := New( + mr, _, err := New( zap.NewNop(), fake.NewFakeClient(tc.initialState...), fakepubsub.Creator(tc.pubSubData)) + if err != nil { + t.Errorf("Error when creating a New receiver. Error:%s", err) + } resp := httptest.NewRecorder() req := httptest.NewRequest("POST", "/", strings.NewReader(validMessage)) req.Host = "test-channel.test-namespace.channels." + utils.GetClusterDomainName() - mr.newMessageReceiver().HandleRequest(resp, req) + receiver, err := mr.newMessageReceiver() + if err != nil { + t.Errorf("Error when creating a new message receiver. Error:%s", err) + } + receiver.HandleRequest(resp, req) if tc.expectedErr { if resp.Result().StatusCode >= 200 && resp.Result().StatusCode < 300 { t.Errorf("Expected an error. Actual: %v", resp.Result()) diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index 6748e52c088..9fe7cfabd52 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -43,7 +43,6 @@ func main() { if err != nil { log.Fatalf("unable to create logger: %v", err) } - provisionerConfig, err := provisionerController.GetProvisionerConfig("/etc/config-provisioner") if err != nil { logger.Fatal("unable to load provisioner config", zap.Error(err)) diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index d0b25649e00..82298b56519 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -170,6 +170,13 @@ rules: - get - list - watch + - apiGroups: + - eventing.knative.dev + resources: + - channels + verbs: + - list + - watch --- diff --git a/contrib/kafka/pkg/controller/channel/reconcile_test.go b/contrib/kafka/pkg/controller/channel/reconcile_test.go index bff18603f02..33fad32efb3 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile_test.go +++ b/contrib/kafka/pkg/controller/channel/reconcile_test.go @@ -150,7 +150,6 @@ var testCases = []controllertesting.TestCase{ }, WantPresent: []runtime.Object{ getNewChannelWithStatusAndFinalizer(channelName, clusterChannelProvisionerName), - makeK8sService(), }, }, { @@ -161,6 +160,7 @@ var testCases = []controllertesting.TestCase{ }, WantPresent: []runtime.Object{ getNewChannelProvisionedStatus(channelName, clusterChannelProvisionerName), + makeK8sService(), }, }, { @@ -529,6 +529,7 @@ func om(namespace, name string) metav1.ObjectMeta { Namespace: namespace, Name: name, SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name), + UID: testUID, } } @@ -548,8 +549,10 @@ func makeK8sService() *corev1.Service { GenerateName: fmt.Sprintf("%s-channel-", channelName), Namespace: testNS, Labels: map[string]string{ - util.EventingChannelLabel: channelName, - util.OldEventingChannelLabel: channelName, + util.EventingChannelLabel: channelName, + util.OldEventingChannelLabel: channelName, + util.EventingProvisionerLabel: clusterChannelProvisionerName, + util.OldEventingProvisionerLabel: clusterChannelProvisionerName, }, OwnerReferences: []metav1.OwnerReference{ { diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index adad489be49..bfe4790d062 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -34,8 +34,9 @@ import ( ) type KafkaDispatcher struct { - config atomic.Value - updateLock sync.Mutex + config atomic.Value + hostToChannelMap atomic.Value + updateLock sync.Mutex receiver *provisioners.MessageReceiver dispatcher *provisioners.MessageDispatcher @@ -131,12 +132,36 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error } } + hcMap, err := createHostToChannelMap(config) + if err != nil { + return err + } + d.setHostToChannelMap(hcMap) + // Update the config so that it can be used for comparison during next sync d.setConfig(config) + } return nil } +func createHostToChannelMap(config *multichannelfanout.Config) (map[string]provisioners.ChannelReference, error) { + hcMap := make(map[string]provisioners.ChannelReference) + for _, cConfig := range config.ChannelConfigs { + if cr, ok := hcMap[cConfig.HostName]; ok { + return nil, fmt.Errorf( + "Duplicate hostName found. HostName:%s, channel:%s.%s, channel:%s.%s", + cConfig.HostName, + cConfig.Namespace, + cConfig.Name, + cr.Namespace, + cr.Name) + } + hcMap[cConfig.HostName] = provisioners.ChannelReference{Name: cConfig.Name, Namespace: cConfig.Namespace} + } + return hcMap, nil +} + // Start starts the kafka dispatcher's message processing. func (d *KafkaDispatcher) Start(stopCh <-chan struct{}) error { if d.receiver == nil { @@ -258,8 +283,15 @@ func (d *KafkaDispatcher) setConfig(config *multichannelfanout.Config) { d.config.Store(config) } -func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger *zap.Logger) (*KafkaDispatcher, error) { +func (d *KafkaDispatcher) getHostToChannelMap() map[string]provisioners.ChannelReference { + return d.hostToChannelMap.Load().(map[string]provisioners.ChannelReference) +} +func (d *KafkaDispatcher) setHostToChannelMap(hcMap map[string]provisioners.ChannelReference) { + d.hostToChannelMap.Store(hcMap) +} + +func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger *zap.Logger) (*KafkaDispatcher, error) { conf := sarama.NewConfig() conf.Version = sarama.V1_1_0_0 conf.ClientID = controller.Name + "-dispatcher" @@ -282,16 +314,31 @@ func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger * logger: logger, } - receiverFunc := provisioners.NewMessageReceiver( + receiverFunc, err := provisioners.NewMessageReceiver( func(channel provisioners.ChannelReference, message *provisioners.Message) error { dispatcher.kafkaAsyncProducer.Input() <- toKafkaMessage(channel, message) return nil - }, logger.Sugar()) + }, + logger.Sugar(), + provisioners.ResolveChannelFromHostHeader(provisioners.ResolveChannelFromHostFunc(dispatcher.getChannelReferenceFromHost))) + if err != nil { + return nil, err + } dispatcher.receiver = receiverFunc dispatcher.setConfig(&multichannelfanout.Config{}) + dispatcher.setHostToChannelMap(map[string]provisioners.ChannelReference{}) return dispatcher, nil } +func (d *KafkaDispatcher) getChannelReferenceFromHost(host string) (provisioners.ChannelReference, error) { + chMap := d.getHostToChannelMap() + cr, ok := chMap[host] + if !ok { + return cr, fmt.Errorf("Invalid HostName:%s. HostName not found in ConfigMap for any Channel", host) + } + return cr, nil +} + func fromKafkaMessage(kafkaMessage *sarama.ConsumerMessage) *provisioners.Message { headers := make(map[string]string) for _, header := range kafkaMessage.Headers { diff --git a/contrib/kafka/pkg/dispatcher/dispatcher_test.go b/contrib/kafka/pkg/dispatcher/dispatcher_test.go index 8c333e7c56e..9d0a32222fb 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher_test.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher_test.go @@ -183,23 +183,29 @@ func (c *mockSaramaCluster) GetConsumerMode() cluster.ConsumerMode { func TestDispatcher_UpdateConfig(t *testing.T) { testCases := []struct { - name string - oldConfig *multichannelfanout.Config - newConfig *multichannelfanout.Config - subscribes []string - unsubscribes []string - createErr string + name string + oldConfig *multichannelfanout.Config + newConfig *multichannelfanout.Config + subscribes []string + unsubscribes []string + createErr string + oldHostToChanMap map[string]provisioners.ChannelReference + newHostToChanMap map[string]provisioners.ChannelReference }{ { - name: "nil config", - oldConfig: &multichannelfanout.Config{}, - newConfig: nil, - createErr: "nil config", + name: "nil config", + oldConfig: &multichannelfanout.Config{}, + newConfig: nil, + createErr: "nil config", + oldHostToChanMap: map[string]provisioners.ChannelReference{}, + newHostToChanMap: map[string]provisioners.ChannelReference{}, }, { - name: "same config", - oldConfig: &multichannelfanout.Config{}, - newConfig: &multichannelfanout.Config{}, + name: "same config", + oldConfig: &multichannelfanout.Config{}, + newConfig: &multichannelfanout.Config{}, + oldHostToChanMap: map[string]provisioners.ChannelReference{}, + newHostToChanMap: map[string]provisioners.ChannelReference{}, }, { name: "config with no subscription", @@ -209,9 +215,14 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel", + HostName: "a.b.c.d", }, }, }, + oldHostToChanMap: map[string]provisioners.ChannelReference{}, + newHostToChanMap: map[string]provisioners.ChannelReference{ + "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"}, + }, }, { name: "single channel w/ new subscriptions", @@ -221,6 +232,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel", + HostName: "a.b.c.d", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -240,7 +252,11 @@ func TestDispatcher_UpdateConfig(t *testing.T) { }, }, }, - subscribes: []string{"subscription-1", "subscription-2"}, + subscribes: []string{"subscription-1", "subscription-2"}, + oldHostToChanMap: map[string]provisioners.ChannelReference{}, + newHostToChanMap: map[string]provisioners.ChannelReference{ + "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"}, + }, }, { name: "single channel w/ existing subscriptions", @@ -249,6 +265,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel", + HostName: "a.b.c.d", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -269,6 +286,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel", + HostName: "a.b.c.d", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -290,6 +308,12 @@ func TestDispatcher_UpdateConfig(t *testing.T) { }, subscribes: []string{"subscription-2", "subscription-3"}, unsubscribes: []string{"subscription-1"}, + oldHostToChanMap: map[string]provisioners.ChannelReference{ + "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"}, + }, + newHostToChanMap: map[string]provisioners.ChannelReference{ + "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"}, + }, }, { name: "multi channel w/old and new subscriptions", @@ -298,6 +322,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel-1", + HostName: "a.b.c.d", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -319,6 +344,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel-1", + HostName: "a.b.c.d", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -333,6 +359,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { { Namespace: "default", Name: "test-channel-2", + HostName: "e.f.g.h", FanoutConfig: fanout.Config{ Subscriptions: []eventingduck.ChannelSubscriberSpec{ { @@ -354,6 +381,33 @@ func TestDispatcher_UpdateConfig(t *testing.T) { }, subscribes: []string{"subscription-1", "subscription-3", "subscription-4"}, unsubscribes: []string{"subscription-2"}, + oldHostToChanMap: map[string]provisioners.ChannelReference{ + "a.b.c.d": provisioners.ChannelReference{Name: "test-channel-1", Namespace: "default"}, + }, + newHostToChanMap: map[string]provisioners.ChannelReference{ + "a.b.c.d": provisioners.ChannelReference{Name: "test-channel-1", Namespace: "default"}, + "e.f.g.h": provisioners.ChannelReference{Name: "test-channel-2", Namespace: "default"}, + }, + }, + { + name: "Duplicate hostnames", + oldConfig: &multichannelfanout.Config{}, + newConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Namespace: "default", + Name: "test-channel-1", + HostName: "a.b.c.d", + }, + { + Namespace: "default", + Name: "test-channel-2", + HostName: "a.b.c.d", + }, + }, + }, + createErr: "Duplicate hostName found. HostName:a.b.c.d, channel:default.test-channel-2, channel:default.test-channel-1", + oldHostToChanMap: map[string]provisioners.ChannelReference{}, }, } @@ -367,10 +421,12 @@ func TestDispatcher_UpdateConfig(t *testing.T) { logger: zap.NewNop(), } d.setConfig(&multichannelfanout.Config{}) + d.setHostToChannelMap(map[string]provisioners.ChannelReference{}) // Initialize using oldConfig err := d.UpdateConfig(tc.oldConfig) if err != nil { + t.Errorf("unexpected error: %v", err) } oldSubscribers := sets.NewString() @@ -382,6 +438,12 @@ func TestDispatcher_UpdateConfig(t *testing.T) { if diff := sets.NewString(tc.unsubscribes...).Difference(oldSubscribers); diff.Len() != 0 { t.Errorf("subscriptions %+v were never subscribed", diff) } + if diff := cmp.Diff(tc.oldConfig, d.getConfig()); diff != "" { + t.Errorf("unexpected config (-want, +got) = %v", diff) + } + if diff := cmp.Diff(tc.oldHostToChanMap, d.getHostToChannelMap()); diff != "" { + t.Errorf("unexpected hostToChannelMap (-want, +got) = %v", diff) + } // Update with new config err = d.UpdateConfig(tc.newConfig) @@ -406,6 +468,12 @@ func TestDispatcher_UpdateConfig(t *testing.T) { if diff := cmp.Diff(tc.subscribes, newSubscribers, sortStrings); diff != "" { t.Errorf("unexpected subscribers (-want, +got) = %v", diff) } + if diff := cmp.Diff(tc.newHostToChanMap, d.getHostToChannelMap()); diff != "" { + t.Errorf("unexpected hostToChannelMap (-want, +got) = %v", diff) + } + if diff := cmp.Diff(tc.newConfig, d.getConfig()); diff != "" { + t.Errorf("unexpected config (-want, +got) = %v", diff) + } }) } @@ -631,9 +699,13 @@ func TestKafkaDispatcher_Start(t *testing.T) { t.Errorf("Expected error want %s, got %s", "message receiver is not set", err) } - d.receiver = provisioners.NewMessageReceiver(func(channel provisioners.ChannelReference, message *provisioners.Message) error { + receiver, err := provisioners.NewMessageReceiver(func(channel provisioners.ChannelReference, message *provisioners.Message) error { return nil }, zap.NewNop().Sugar()) + if err != nil { + t.Fatalf("Error creating new message receiver. Error:%s", err) + } + d.receiver = receiver err = d.Start(make(chan struct{})) if err == nil { t.Errorf("Expected error want %s, got %s", "kafkaAsyncProducer is not set", err) diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go index fa6675b32cb..e2ca29c959d 100644 --- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go +++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go @@ -71,7 +71,11 @@ func NewDispatcher(natssURL, clusterID string, logger *zap.Logger) (*Subscriptio clusterID: clusterID, subscriptions: make(map[provisioners.ChannelReference]map[subscriptionReference]*stan.Subscription), } - d.receiver = provisioners.NewMessageReceiver(createReceiverFunction(d, logger.Sugar()), logger.Sugar()) + receiver, err := provisioners.NewMessageReceiver(createReceiverFunction(d, logger.Sugar()), logger.Sugar()) + if err != nil { + return nil, err + } + d.receiver = receiver return d, nil } diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index 1b9f7dcbb2a..b9a77670ce8 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -26,9 +26,9 @@ type reconciler struct { func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) { ctx := logging.WithLogger(context.TODO(), r.logger.With(zap.Any("request", req))) - r.logger.Info("New update for channel.") + logging.FromContext(ctx).Info("New update for channel.") if err := r.handler(ctx, r.client, req.NamespacedName); err != nil { - r.logger.Error("WatchHandlerFunc returned error", zap.Error(err)) + logging.FromContext(ctx).Error("WatchHandlerFunc returned error", zap.Error(err)) return reconcile.Result{}, err } return reconcile.Result{}, nil diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index 3ca480a6582..f0705452d44 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -65,7 +65,7 @@ func RemoveFinalizer(o metav1.Object, finalizerName string) { type k8sServiceOption func(*corev1.Service) error -// ExternalService is a functional option for CreateK8sService to create a K8s service of type ExternalName +// ExternalService is a functional option for CreateK8sService to create a K8s service of type ExternalName. func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { return func(svc *corev1.Service) error { svc.Spec = corev1.ServiceSpec{ @@ -132,6 +132,7 @@ func createK8sService(ctx context.Context, client runtimeClient.Client, getSvc g !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) || // This DeepEqual is necessary to force update dispatcher services when upgrading from 0.5 to 0.6. // Above DeepDerivative will not work because we have removed an optional field (name) from ports + // TODO: Remove this check in 0.7+ !equality.Semantic.DeepEqual(svc.Spec.Ports, current.Spec.Ports) { current.Spec = svc.Spec current.ObjectMeta.Labels = addExpectedLabels(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go index fffbdc34c64..2800027f485 100644 --- a/pkg/provisioners/inmemory/channel/reconcile.go +++ b/pkg/provisioners/inmemory/channel/reconcile.go @@ -95,11 +95,12 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q", c.Name) } - if updateStatusErr := util.UpdateChannel(ctx, r.client, c); updateStatusErr != nil { + if updateStatusErr := r.client.Status().Update(ctx, c); updateStatusErr != nil { logger.Info("Error updating Channel Status", zap.Error(updateStatusErr)) r.recorder.Eventf(c, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update Channel's status: %v", err) return reconcile.Result{}, updateStatusErr } + return reconcile.Result{}, err } @@ -117,18 +118,9 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) c.Status.InitializeConditions() - // We are syncing three things: - // 1. The K8s Service to talk to this Channel. - // 3. The configuration of all Channel subscriptions. - - if c.DeletionTimestamp != nil { - // K8s garbage collection will delete the K8s service for this channel. - // We use a finalizer to ensure the channel config has been synced. - util.RemoveFinalizer(c, finalizerName) - return nil - } - - util.AddFinalizer(c, finalizerName) + // We are syncing the following: + // The K8s Service to talk to this Channel. + // The configuration of all Channel subscriptions. svc, err := util.CreateK8sService(ctx, r.client, c, util.ExternalService(c)) if err != nil { diff --git a/pkg/provisioners/inmemory/controller/main.go b/pkg/provisioners/inmemory/controller/main.go index 99ee64f885e..fbbdd262dff 100644 --- a/pkg/provisioners/inmemory/controller/main.go +++ b/pkg/provisioners/inmemory/controller/main.go @@ -29,9 +29,8 @@ import ( "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" - // uncomment this line to debug in GKE from local machine - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) func main() { diff --git a/pkg/provisioners/message_receiver.go b/pkg/provisioners/message_receiver.go index 3874fded80b..b4fceb1022c 100644 --- a/pkg/provisioners/message_receiver.go +++ b/pkg/provisioners/message_receiver.go @@ -34,16 +34,30 @@ const ( // Message receiver receives messages. type MessageReceiver struct { - receiverFunc func(ChannelReference, *Message) error - forwardHeaders sets.String - forwardPrefixes []string + receiverFunc func(ChannelReference, *Message) error + forwardHeaders sets.String + forwardPrefixes []string + logger *zap.SugaredLogger + hostToChannelFunc ResolveChannelFromHostFunc +} + +type receiverOptions func(*MessageReceiver) error - logger *zap.SugaredLogger +type ResolveChannelFromHostFunc func(string) (ChannelReference, error) + +// ResolveChannelFromHostHeader is a receiverOption that enables the consumer of the MessageReceiver +// to pass a map[]ChannelReference. This map will then be used to to get the ChannelReference +// from httpRequest.Host before calling receiverFunc +func ResolveChannelFromHostHeader(hostToChannelFunc ResolveChannelFromHostFunc) receiverOptions { + return func(r *MessageReceiver) error { + r.hostToChannelFunc = hostToChannelFunc + return nil + } } // NewMessageReceiver creates a message receiver passing new messages to the // receiverFunc. -func NewMessageReceiver(receiverFunc func(ChannelReference, *Message) error, logger *zap.SugaredLogger) *MessageReceiver { +func NewMessageReceiver(receiverFunc func(ChannelReference, *Message) error, logger *zap.SugaredLogger, opts ...receiverOptions) (*MessageReceiver, error) { receiver := &MessageReceiver{ receiverFunc: receiverFunc, forwardHeaders: sets.NewString(forwardHeaders...), @@ -51,7 +65,16 @@ func NewMessageReceiver(receiverFunc func(ChannelReference, *Message) error, log logger: logger, } - return receiver + for _, opt := range opts { + if err := opt(receiver); err != nil { + return nil, err + } + } + // Default to old behaviour of host = channelName.channelNamespace + if receiver.hostToChannelFunc == nil { + receiver.hostToChannelFunc = ResolveChannelFromHostFunc(ParseChannel) + } + return receiver, nil } // Start begings to receive messages for the receiver. @@ -116,13 +139,13 @@ func (r *MessageReceiver) handler() http.Handler { func (r *MessageReceiver) HandleRequest(res http.ResponseWriter, req *http.Request) { host := req.Host r.logger.Infof("Received request for %s", host) - channel, err := ParseChannel(host) + channel, err := r.hostToChannelFunc(host) if err != nil { r.logger.Info("Could not extract channel", zap.Error(err)) res.WriteHeader(http.StatusInternalServerError) return } - + r.logger.Infof("Request mapped to channel: %s", channel.String()) message, err := r.fromRequest(req) if err != nil { res.WriteHeader(http.StatusInternalServerError) diff --git a/pkg/provisioners/message_receiver_test.go b/pkg/provisioners/message_receiver_test.go index 8cd8ca9bfbe..e4ec33a718a 100644 --- a/pkg/provisioners/message_receiver_test.go +++ b/pkg/provisioners/message_receiver_test.go @@ -126,7 +126,10 @@ func TestMessageReceiver_HandleRequest(t *testing.T) { } f := tc.receiverFunc - r := NewMessageReceiver(f, zap.NewNop().Sugar()) + r, err := NewMessageReceiver(f, zap.NewNop().Sugar()) + if err != nil { + t.Fatalf("Error creating new message receiver. Error:%s", err) + } h := r.handler() body := tc.bodyReader diff --git a/pkg/provisioners/provisioner_util.go b/pkg/provisioners/provisioner_util.go index a65551fcc3f..8ef70596ed2 100644 --- a/pkg/provisioners/provisioner_util.go +++ b/pkg/provisioners/provisioner_util.go @@ -80,7 +80,8 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner, opts { // There is a bug in Istio where named port doesn't work when connecting using an ExternalName service // Refer to https://github.com/istio/istio/issues/13193 for more details. - // TODO: Revert this when ISTIO fixes the issue + // TODO: uncomment Name:"http" when ISTIO fixes the issue + // Name: "http", Port: 80, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(8080), @@ -91,7 +92,7 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner, opts for _, opt := range opts { if err := opt(svc); err != nil { - return svc, err + return nil, err } } return svc, nil diff --git a/pkg/sidecar/fanout/fanout_handler.go b/pkg/sidecar/fanout/fanout_handler.go index 2fd4ae97f9a..bd67bfe32c8 100644 --- a/pkg/sidecar/fanout/fanout_handler.go +++ b/pkg/sidecar/fanout/fanout_handler.go @@ -69,7 +69,7 @@ type forwardMessage struct { } // NewHandler creates a new fanout.Handler. -func NewHandler(logger *zap.Logger, config Config) *Handler { +func NewHandler(logger *zap.Logger, config Config) (*Handler, error) { handler := &Handler{ logger: logger, config: config, @@ -79,9 +79,12 @@ func NewHandler(logger *zap.Logger, config Config) *Handler { } // The receiver function needs to point back at the handler itself, so set it up after // initialization. - handler.receiver = provisioners.NewMessageReceiver(createReceiverFunction(handler), logger.Sugar()) - - return handler + receiver, err := provisioners.NewMessageReceiver(createReceiverFunction(handler), logger.Sugar()) + if err != nil { + return nil, err + } + handler.receiver = receiver + return handler, nil } func createReceiverFunction(f *Handler) func(provisioners.ChannelReference, *provisioners.Message) error { diff --git a/pkg/sidecar/fanout/fanout_handler_test.go b/pkg/sidecar/fanout/fanout_handler_test.go index 03b756ca8d9..1163144c8e4 100644 --- a/pkg/sidecar/fanout/fanout_handler_test.go +++ b/pkg/sidecar/fanout/fanout_handler_test.go @@ -225,12 +225,21 @@ func TestFanoutHandler_ServeHTTP(t *testing.T) { subs = append(subs, sub) } - h := NewHandler(zap.NewNop(), Config{Subscriptions: subs}) + h, err := NewHandler(zap.NewNop(), Config{Subscriptions: subs}) + if err != nil { + t.Errorf("NewHandler failed. Error:%s", err) + t.FailNow() + } if tc.asyncHandler { h.config.AsyncHandler = true } if tc.receiverFunc != nil { - h.receiver = provisioners.NewMessageReceiver(tc.receiverFunc, zap.NewNop().Sugar()) + receiver, err := provisioners.NewMessageReceiver(tc.receiverFunc, zap.NewNop().Sugar()) + if err != nil { + t.Errorf("NewMessageReceiver failed. Error:%s", err) + t.FailNow() + } + h.receiver = receiver } if tc.timeout != 0 { h.timeout = tc.timeout diff --git a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go index e4a7e1c9193..c14cd53725c 100644 --- a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go +++ b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go @@ -60,7 +60,11 @@ func NewHandler(logger *zap.Logger, conf Config) (*Handler, error) { for _, cc := range conf.ChannelConfigs { key := makeChannelKeyFromConfig(cc) - handler := fanout.NewHandler(logger, cc.FanoutConfig) + handler, err := fanout.NewHandler(logger, cc.FanoutConfig) + if err != nil { + logger.Error("Failed creating new fanout handler.", zap.Error(err)) + return nil, err + } if _, present := handlers[key]; present { logger.Error("Duplicate channel key", zap.String("channelKey", key)) return nil, fmt.Errorf("duplicate channel key: %v", key) From feb5e64feb4e7a837eb7a553a14d91b6d47517d7 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Fri, 12 Apr 2019 12:01:55 -0700 Subject: [PATCH 11/37] Updated code based on PR comments --- cmd/fanoutsidecar/main.go | 7 ++- pkg/channelwatcher/channel_watcher.go | 4 +- pkg/provisioners/channel_util.go | 3 +- .../inmemory/channel/reconcile.go | 29 ++++++------ .../inmemory/channel/reconcile_test.go | 44 +------------------ pkg/provisioners/inmemory/controller/main.go | 3 +- pkg/provisioners/provisioner_util.go | 5 ++- .../v1alpha1/broker/resources/ingress.go | 1 + 8 files changed, 31 insertions(+), 65 deletions(-) diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 6392dd91e57..54fa882d9a6 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -82,7 +82,7 @@ func main() { } if len(channelProvisioners) < 1 { - logger.Fatal("--channel_provisioners must be specified") + logger.Fatal("--channel_provisioner must be specified") } sh, err := swappable.NewEmptyHandler(logger) @@ -132,7 +132,10 @@ func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfi logger.Error("Error creating new maanger.", zap.Error(err)) return nil, err } - v1alpha1.AddToScheme(mgr.GetScheme()) + if err = v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { + logger.Error("Error while adding eventing scheme to manager.", zap.Error(err)) + return nil, err + } channelwatcher.New(mgr, logger, updateChannelConfig(configUpdated)) return mgr, nil diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index 1b9f7dcbb2a..b9a77670ce8 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -26,9 +26,9 @@ type reconciler struct { func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) { ctx := logging.WithLogger(context.TODO(), r.logger.With(zap.Any("request", req))) - r.logger.Info("New update for channel.") + logging.FromContext(ctx).Info("New update for channel.") if err := r.handler(ctx, r.client, req.NamespacedName); err != nil { - r.logger.Error("WatchHandlerFunc returned error", zap.Error(err)) + logging.FromContext(ctx).Error("WatchHandlerFunc returned error", zap.Error(err)) return reconcile.Result{}, err } return reconcile.Result{}, nil diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index 3ca480a6582..f0705452d44 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -65,7 +65,7 @@ func RemoveFinalizer(o metav1.Object, finalizerName string) { type k8sServiceOption func(*corev1.Service) error -// ExternalService is a functional option for CreateK8sService to create a K8s service of type ExternalName +// ExternalService is a functional option for CreateK8sService to create a K8s service of type ExternalName. func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { return func(svc *corev1.Service) error { svc.Spec = corev1.ServiceSpec{ @@ -132,6 +132,7 @@ func createK8sService(ctx context.Context, client runtimeClient.Client, getSvc g !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) || // This DeepEqual is necessary to force update dispatcher services when upgrading from 0.5 to 0.6. // Above DeepDerivative will not work because we have removed an optional field (name) from ports + // TODO: Remove this check in 0.7+ !equality.Semantic.DeepEqual(svc.Spec.Ports, current.Spec.Ports) { current.Spec = svc.Spec current.ObjectMeta.Labels = addExpectedLabels(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go index fffbdc34c64..630f22b4d6b 100644 --- a/pkg/provisioners/inmemory/channel/reconcile.go +++ b/pkg/provisioners/inmemory/channel/reconcile.go @@ -85,6 +85,19 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } logger.Info("Reconciling Channel") + // Finalizer needs to be removed (even though no finalizers are added) main back compat + // with v0.5 in which a finalzier was added. Or else channels will not get deleted after upgrading to 0.6 + // TODO: Remove this entire if block in v0.7+ + if c.DeletionTimestamp != nil { + // K8s garbage collection will delete the K8s service and VirtualService for this channel. + // We use a finalizer to ensure the channel config has been synced. + util.RemoveFinalizer(c, finalizerName) + r.client.Update(ctx, c) + logger.Info("Channel reconciled") + r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q", c.Name) + return reconcile.Result{}, nil + } + err = r.reconcile(ctx, c) if err != nil { logger.Info("Error reconciling Channel", zap.Error(err)) @@ -95,7 +108,7 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q", c.Name) } - if updateStatusErr := util.UpdateChannel(ctx, r.client, c); updateStatusErr != nil { + if updateStatusErr := r.client.Status().Update(ctx, c); updateStatusErr != nil { logger.Info("Error updating Channel Status", zap.Error(updateStatusErr)) r.recorder.Eventf(c, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update Channel's status: %v", err) return reconcile.Result{}, updateStatusErr @@ -117,19 +130,7 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) c.Status.InitializeConditions() - // We are syncing three things: - // 1. The K8s Service to talk to this Channel. - // 3. The configuration of all Channel subscriptions. - - if c.DeletionTimestamp != nil { - // K8s garbage collection will delete the K8s service for this channel. - // We use a finalizer to ensure the channel config has been synced. - util.RemoveFinalizer(c, finalizerName) - return nil - } - - util.AddFinalizer(c, finalizerName) - + // We are syncing K8s Service to talk to this Channel. svc, err := util.CreateK8sService(ctx, r.client, c, util.ExternalService(c)) if err != nil { logger.Info("Error creating the Channel's K8s Service", zap.Error(err)) diff --git a/pkg/provisioners/inmemory/channel/reconcile_test.go b/pkg/provisioners/inmemory/channel/reconcile_test.go index 76aa9e06f95..2978419df42 100644 --- a/pkg/provisioners/inmemory/channel/reconcile_test.go +++ b/pkg/provisioners/inmemory/channel/reconcile_test.go @@ -265,7 +265,7 @@ func TestReconcile(t *testing.T) { MockLists: errorListingK8sService(), }, WantPresent: []runtime.Object{ - makeChannelWithFinalizer(), + makeChannel(), }, WantErrMsg: testErrorMessage, WantEvent: []corev1.Event{ @@ -282,41 +282,14 @@ func TestReconcile(t *testing.T) { }, WantPresent: []runtime.Object{ // TODO: This should have a useful error message saying that the K8s Service failed. - makeChannelWithFinalizer(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[k8sServiceCreateFailed], - }, - }, - { - Name: "Channel get for update fails", - InitialState: []runtime.Object{ makeChannel(), - makeK8sService(), - }, - Mocks: controllertesting.Mocks{ - MockGets: errorOnSecondChannelGet(), }, WantErrMsg: testErrorMessage, WantEvent: []corev1.Event{ - events[channelReconciled], events[channelUpdateStatusFailed], + events[k8sServiceCreateFailed], }, }, { - Name: "Channel update fails", - InitialState: []runtime.Object{ - makeChannel(), - makeK8sService(), - }, - Mocks: controllertesting.Mocks{ - MockUpdates: errorUpdatingChannel(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[channelReconciled], events[channelUpdateStatusFailed], - }, - }, { Name: "Channel status update fails", InitialState: []runtime.Object{ makeChannel(), @@ -405,19 +378,6 @@ func getProvisionerName(pn []string) string { return provisionerName } -func makeChannelWithFinalizerAndAddress() *eventingv1alpha1.Channel { - c := makeChannelWithFinalizer() - c.Status.SetAddress(serviceAddress) - return c -} - -func makeReadyChannel() *eventingv1alpha1.Channel { - // Ready channels have the finalizer and are Addressable. - c := makeChannelWithFinalizerAndAddress() - c.Status.MarkProvisioned() - return c -} - func makeChannelNilProvisioner() *eventingv1alpha1.Channel { c := makeChannel() c.Spec.Provisioner = nil diff --git a/pkg/provisioners/inmemory/controller/main.go b/pkg/provisioners/inmemory/controller/main.go index 99ee64f885e..fbbdd262dff 100644 --- a/pkg/provisioners/inmemory/controller/main.go +++ b/pkg/provisioners/inmemory/controller/main.go @@ -29,9 +29,8 @@ import ( "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" - // uncomment this line to debug in GKE from local machine - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) func main() { diff --git a/pkg/provisioners/provisioner_util.go b/pkg/provisioners/provisioner_util.go index a65551fcc3f..0a3653df75f 100644 --- a/pkg/provisioners/provisioner_util.go +++ b/pkg/provisioners/provisioner_util.go @@ -80,7 +80,8 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner, opts { // There is a bug in Istio where named port doesn't work when connecting using an ExternalName service // Refer to https://github.com/istio/istio/issues/13193 for more details. - // TODO: Revert this when ISTIO fixes the issue + // TODO: Uncomment Name:"http" when ISTIO fixes the issue + // Name: "http", Port: 80, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(8080), @@ -91,7 +92,7 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner, opts for _, opt := range opts { if err := opt(svc); err != nil { - return svc, err + return nil, err } } return svc, nil diff --git a/pkg/reconciler/v1alpha1/broker/resources/ingress.go b/pkg/reconciler/v1alpha1/broker/resources/ingress.go index 8df5a57f841..3bde11755e9 100644 --- a/pkg/reconciler/v1alpha1/broker/resources/ingress.go +++ b/pkg/reconciler/v1alpha1/broker/resources/ingress.go @@ -59,6 +59,7 @@ func MakeIngress(args *IngressArgs) *appsv1.Deployment { ObjectMeta: metav1.ObjectMeta{ Labels: ingressLabels(args.Broker), // TODO: Remove this annotation once all channels stop using istio virtual service + // https://github.com/knative/eventing/issues/294 Annotations: map[string]string{ "sidecar.istio.io/inject": "true", }, From c1b85816e8c8f2572a760bed102a4388110e7c63 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 13:13:15 -0700 Subject: [PATCH 12/37] WIP --- cmd/fanoutsidecar/main.go | 5 - contrib/kafka/cmd/dispatcher/main.go | 33 +--- pkg/channelwatcher/channel_watcher.go | 44 ++++- pkg/channelwatcher/channel_watcher_test.go | 179 ++++++++++++++++++ pkg/reconciler/testing/table.go | 6 +- pkg/sidecar/multichannelfanout/config.go | 10 - pkg/sidecar/multichannelfanout/config_test.go | 141 ++++++++++++++ 7 files changed, 366 insertions(+), 52 deletions(-) create mode 100644 pkg/channelwatcher/channel_watcher_test.go create mode 100644 pkg/sidecar/multichannelfanout/config_test.go diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 618105b53e0..d560806e8cf 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -131,14 +131,9 @@ func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfi logger.Error("Error creating new maanger.", zap.Error(err)) return nil, err } -<<<<<<< HEAD - if err := v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { - logger.Error("Error adding eventinging scheme to manager.", zap.Error(err)) -======= if err = v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { logger.Error("Error while adding eventing scheme to manager.", zap.Error(err)) return nil, err ->>>>>>> feb5e64feb4e7a837eb7a553a14d91b6d47517d7 } channelwatcher.New(mgr, logger, updateChannelConfig(configUpdated)) diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index 9fe7cfabd52..7f15663181c 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -17,7 +17,6 @@ limitations under the License. package main import ( - "context" "flag" "log" @@ -26,13 +25,8 @@ import ( "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/channelwatcher" - "github.com/knative/eventing/pkg/logging" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" - "github.com/knative/eventing/pkg/sidecar/swappable" "github.com/knative/pkg/signals" "go.uber.org/zap" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" ) @@ -62,7 +56,7 @@ func main() { } v1alpha1.AddToScheme(mgr.GetScheme()) - channelwatcher.New(mgr, logger, updateChannelConfig(kafkaDispatcher.UpdateConfig)) + channelwatcher.New(mgr, logger, channelwatcher.UpdateChannelConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)) if err != nil { logger.Fatal("Unable to create channel watcher.", zap.Error(err)) } @@ -75,31 +69,6 @@ func main() { } logger.Info("Exiting...") } -func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.WatchHandlerFunc { - return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { - channels, err := listAllChannels(ctx, c) - if err != nil { - logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) - return err - } - config := multichannelfanout.NewConfigFromChannels(channels) - return updateConfig(config) - } -} - -func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) { - channels := make([]v1alpha1.Channel, 0) - cl := &v1alpha1.ChannelList{} - if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { - return nil, err - } - for _, c := range cl.Items { - if c.Status.IsReady() && shouldWatch(&c) { - channels = append(channels, c) - } - } - return channels, nil -} func shouldWatch(ch *v1alpha1.Channel) bool { return ch.Spec.Provisioner != nil && diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index b9a77670ce8..8c687d01690 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -9,6 +9,8 @@ import ( "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/eventing/pkg/sidecar/swappable" "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -16,8 +18,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error - type reconciler struct { client client.Client logger *zap.Logger @@ -34,6 +34,7 @@ func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) return reconcile.Result{}, nil } +// New creates a new instance of Channel Watcher that watches channels and calls the watchHandler on add, update, delete and generic event func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) error { c, err := controller.New("ChannelWatcher", mgr, controller.Options{ Reconciler: &reconciler{ @@ -57,3 +58,42 @@ func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) } return nil } + +// WatchHandlerFunc is called whenever an add, update, delete or generic event is triggers on a channel watch +type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error + +// ShouldWatchFunc is called while returning list of channels. +// Channels are included in the list if the return value is true. +type ShouldWatchFunc func(ch *v1alpha1.Channel) bool + +// UpdateChannelConfigWatchHandler is a special handler that +// 1. Lists the channels for which shouldWatch returns true +// 2. Creates a multi-channel-fanout-config +// 3. Calls the updateConfig func with the new multi-channel-fanout-config +// This is used by dispatchers or receivers to update their configs by watching channels +func UpdateChannelConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch ShouldWatchFunc) WatchHandlerFunc { + return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { + channels, err := listAllChannels(ctx, c, shouldWatch) + if err != nil { + logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) + return err + } + config := multichannelfanout.NewConfigFromChannels(channels) + return updateConfig(config) + } +} + +// ListAllChannels queries client and gets list of all channels for which shouldWatch returns true. +func listAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) { + channels := make([]v1alpha1.Channel, 0) + cl := &v1alpha1.ChannelList{} + if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { + return nil, err + } + for _, c := range cl.Items { + if c.Status.IsReady() && shouldWatch(&c) { + channels = append(channels, c) + } + } + return channels, nil +} diff --git a/pkg/channelwatcher/channel_watcher_test.go b/pkg/channelwatcher/channel_watcher_test.go new file mode 100644 index 00000000000..b3d5395b7ba --- /dev/null +++ b/pkg/channelwatcher/channel_watcher_test.go @@ -0,0 +1,179 @@ +package channelwatcher + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + controllertesting "github.com/knative/eventing/pkg/reconciler/testing" + "github.com/knative/eventing/pkg/sidecar/fanout" + "github.com/knative/eventing/pkg/sidecar/multichannelfanout" + "github.com/knative/eventing/pkg/sidecar/swappable" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + corev1 "k8s.io/api/core/v1" +) + +func init() { + // Add types to scheme + _ = v1alpha1.AddToScheme(scheme.Scheme) +} + +func TestUpdateChannelConfigWatchHandler(t *testing.T) { + tests := []struct { + name string + channels []runtime.Object + clientListError error + updateConfigError error + expectedConfig *multichannelfanout.Config + }{ + { + name: "Client list error", + clientListError: fmt.Errorf("Client list error"), + }, + { + name: "update config error", + updateConfigError: fmt.Errorf("error updating config"), + expectedConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{}, + }, + }, + { + name: "Successfully update config", + channels: []runtime.Object{ + makechannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))), + makechannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), + makechannel("chan-3", "donotwatch", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), + }, + expectedConfig: &multichannelfanout.Config{ + ChannelConfigs: []multichannelfanout.ChannelConfig{ + { + Name: "chan-1", + Namespace: "ns-1", + HostName: "e.f.g.h", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + makeSubscriber("sub1"), + makeSubscriber("sub2"), + }, + }, + }, { + Name: "chan-2", + Namespace: "ns-2", + HostName: "i.j.k.l", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + makeSubscriber("sub3"), + makeSubscriber("sub4"), + }, + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actualConfig := ConfigHolder{} + watchHandler := UpdateChannelConfigWatchHandler(updateConfigWrapper(&actualConfig, test.updateConfigError), shouldWatch) + mockClient := getClient(test.channels, getClientMocks(test.clientListError)) + + actualError := watchHandler(context.TODO(), mockClient, types.NamespacedName{}) + if actualError != nil { + if test.clientListError != nil { + if diff := cmp.Diff(test.clientListError.Error(), actualError.Error()); diff != "" { + t.Fatalf("Unexpected difference (-want +got): %v", diff) + } + } + if test.updateConfigError != nil { + if diff := cmp.Diff(test.updateConfigError.Error(), actualError.Error()); diff != "" { + t.Fatalf("Unexpected difference (-want +got): %v", diff) + } + } + } else { + if test.clientListError != nil { + t.Fatalf("Want error %v \n Got nil", test.clientListError) + } + if test.updateConfigError != nil { + t.Fatalf("Want error %v \n Got nil", test.updateConfigError) + } + } + if diff := cmp.Diff(test.expectedConfig, actualConfig.config); diff != "" { + t.Fatalf("Unexpected difference (-want +got): %v", diff) + } + }) + } +} + +type ConfigHolder struct { + config *multichannelfanout.Config +} + +func shouldWatch(c *v1alpha1.Channel) bool { + if c.Namespace == "donotwatch" { + return false + } + return true +} +func updateConfigWrapper(ch *ConfigHolder, returnError error) swappable.UpdateConfig { + return func(c *multichannelfanout.Config) error { + ch.config = c + return returnError + } +} + +func getClient(objs []runtime.Object, mocks controllertesting.Mocks) *controllertesting.MockClient { + innerClient := fake.NewFakeClient(objs...) + return controllertesting.NewMockClient(innerClient, mocks) +} + +func getClientMocks(listError error) controllertesting.Mocks { + if listError != nil { + return controllertesting.Mocks{ + MockLists: []controllertesting.MockList{ + func(_ client.Client, _ context.Context, _ *client.ListOptions, _ runtime.Object) (controllertesting.MockHandled, error) { + return controllertesting.Handled, listError + }, + }, + } + } + return controllertesting.Mocks{} +} + +func makechannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) *v1alpha1.Channel { + c := v1alpha1.Channel{ + Spec: v1alpha1.ChannelSpec{ + Subscribable: subscribable, + }, + } + c.Name = name + c.Namespace = namespace + c.Status.InitializeConditions() + c.Status.MarkProvisioned() + c.Status.MarkProvisionerInstalled() + c.Status.SetAddress(hostname) + return &c +} +func makeSubscribable(subsriberSpec ...eventingduck.ChannelSubscriberSpec) *eventingduck.Subscribable { + return &eventingduck.Subscribable{ + Subscribers: subsriberSpec, + } +} + +func makeSubscriber(name string) eventingduck.ChannelSubscriberSpec { + return eventingduck.ChannelSubscriberSpec{ + Ref: &corev1.ObjectReference{ + Name: name, + Namespace: name + "-ns", + }, + SubscriberURI: name + "-suburi", + ReplyURI: name + "-replyuri", + } +} diff --git a/pkg/reconciler/testing/table.go b/pkg/reconciler/testing/table.go index 9bf2e2334e6..d26d57a4cf1 100644 --- a/pkg/reconciler/testing/table.go +++ b/pkg/reconciler/testing/table.go @@ -149,7 +149,7 @@ func (tc *TestCase) GetDynamicClient() dynamic.Interface { // GetClient returns the mockClient to use for this test case. func (tc *TestCase) GetClient() *MockClient { - builtObjects := buildAllObjects(tc.InitialState) + builtObjects := BuildAllObjects(tc.InitialState) innerClient := fake.NewFakeClient(builtObjects...) return NewMockClient(innerClient, tc.Mocks) } @@ -224,7 +224,7 @@ func (se stateErrors) Error() string { // to be present after reconciliation. func (tc *TestCase) VerifyWantPresent(c client.Client) error { var errs stateErrors - builtObjects := buildAllObjects(tc.WantPresent) + builtObjects := BuildAllObjects(tc.WantPresent) for _, wp := range builtObjects { o, err := scheme.Scheme.New(wp.GetObjectKind().GroupVersionKind()) if err != nil { @@ -304,7 +304,7 @@ func getEventsAsString(events []corev1.Event) []string { return eventsAsString } -func buildAllObjects(objs []runtime.Object) []runtime.Object { +func BuildAllObjects(objs []runtime.Object) []runtime.Object { builtObjs := []runtime.Object{} for _, obj := range objs { if builder, ok := obj.(Buildable); ok { diff --git a/pkg/sidecar/multichannelfanout/config.go b/pkg/sidecar/multichannelfanout/config.go index 84f29b85089..1c3ca420def 100644 --- a/pkg/sidecar/multichannelfanout/config.go +++ b/pkg/sidecar/multichannelfanout/config.go @@ -14,16 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package multichannelfanout provides an http.Handler that takes in one request to a Knative -// Channel and fans it out to N other requests. Logically, it represents multiple Knative Channels. -// It is made up of a map, map[channel]fanout.Handler and each incoming request is inspected to -// determine which Channel it is on. This Handler delegates the HTTP handling to the fanout.Handler -// corresponding to the incoming request's Channel. -// It is often used in conjunction with a swappable.Handler. The swappable.Handler delegates all its -// requests to the multichannelfanout.Handler. When a new configuration is available, a new -// multichannelfanout.Handler is created and swapped in for all subsequent requests. The old -// multichannelfanout.Handler is discarded. - package multichannelfanout import ( diff --git a/pkg/sidecar/multichannelfanout/config_test.go b/pkg/sidecar/multichannelfanout/config_test.go new file mode 100644 index 00000000000..a6d3d5ed782 --- /dev/null +++ b/pkg/sidecar/multichannelfanout/config_test.go @@ -0,0 +1,141 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package multichannelfanout provides an http.Handler that takes in one request to a Knative +// Channel and fans it out to N other requests. Logically, it represents multiple Knative Channels. +// It is made up of a map, map[channel]fanout.Handler and each incoming request is inspected to +// determine which Channel it is on. This Handler delegates the HTTP handling to the fanout.Handler +// corresponding to the incoming request's Channel. +// It is often used in conjunction with a swappable.Handler. The swappable.Handler delegates all its +// requests to the multichannelfanout.Handler. When a new configuration is available, a new +// multichannelfanout.Handler is created and swapped in for all subsequent requests. The old +// multichannelfanout.Handler is discarded. + +package multichannelfanout + +import ( + "testing" + + "github.com/knative/eventing/pkg/sidecar/fanout" + + "github.com/google/go-cmp/cmp" + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + corev1 "k8s.io/api/core/v1" +) + +func TestNewConfigFromChannels(t *testing.T) { + tests := []struct { + name string + channels []v1alpha1.Channel + expected *Config + }{ + { + name: "empty channels list", + channels: []v1alpha1.Channel{}, + expected: &Config{ + ChannelConfigs: []ChannelConfig{}, + }, + }, { + name: "one channel with no subscribers", + channels: []v1alpha1.Channel{ + makechannel("chan-1", "ns-1", "a.b.c.d", nil), + }, + expected: &Config{ + ChannelConfigs: []ChannelConfig{ + { + Name: "chan-1", + Namespace: "ns-1", + HostName: "a.b.c.d", + }, + }, + }, + }, { + name: "multiple channels with subscribers", + channels: []v1alpha1.Channel{ + makechannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))), + makechannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), + }, + expected: &Config{ + ChannelConfigs: []ChannelConfig{ + { + Name: "chan-1", + Namespace: "ns-1", + HostName: "e.f.g.h", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + makeSubscriber("sub1"), + makeSubscriber("sub2"), + }, + }, + }, { + Name: "chan-2", + Namespace: "ns-2", + HostName: "i.j.k.l", + FanoutConfig: fanout.Config{ + Subscriptions: []eventingduck.ChannelSubscriberSpec{ + makeSubscriber("sub3"), + makeSubscriber("sub4"), + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual := NewConfigFromChannels(test.channels) + if diff := cmp.Diff(test.expected, actual); diff != "" { + t.Fatalf("Unexpected difference (-want +got): %v", diff) + } + }) + } +} + +func makechannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) v1alpha1.Channel { + c := v1alpha1.Channel{ + Spec: v1alpha1.ChannelSpec{ + Subscribable: subscribable, + }, + Status: v1alpha1.ChannelStatus{ + Address: duckv1alpha1.Addressable{ + Hostname: hostname, + }, + }, + } + c.Name = name + c.Namespace = namespace + return c +} +func makeSubscribable(subsriberSpec ...eventingduck.ChannelSubscriberSpec) *eventingduck.Subscribable { + return &eventingduck.Subscribable{ + Subscribers: subsriberSpec, + } +} + +func makeSubscriber(name string) eventingduck.ChannelSubscriberSpec { + return eventingduck.ChannelSubscriberSpec{ + Ref: &corev1.ObjectReference{ + Name: name, + Namespace: name + "-ns", + }, + SubscriberURI: name + "-suburi", + ReplyURI: name + "-replyuri", + } +} From d2c831f7985e315a981ac41f31c48c5aa8717c3f Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 14:11:12 -0700 Subject: [PATCH 13/37] Updates based on PR comments --- cmd/broker/ingress/main.go | 3 +- cmd/controller/main.go | 16 +++++----- cmd/fanoutsidecar/main.go | 16 ++++++++-- contrib/kafka/cmd/controller/main.go | 12 ++++---- contrib/kafka/main.go | 12 ++++---- pkg/provisioners/channel_util.go | 29 +++++++++++++------ pkg/provisioners/channel_util_test.go | 29 +++++++++++++++++++ .../inmemory/channel/reconcile.go | 16 ++++------ .../inmemory/channel/reconcile_test.go | 21 ++++---------- pkg/provisioners/inmemory/controller/main.go | 2 +- 10 files changed, 96 insertions(+), 60 deletions(-) diff --git a/cmd/broker/ingress/main.go b/cmd/broker/ingress/main.go index ea0094fba73..4e1ec764f55 100644 --- a/cmd/broker/ingress/main.go +++ b/cmd/broker/ingress/main.go @@ -42,10 +42,11 @@ import ( "go.opencensus.io/stats/view" "go.opencensus.io/tag" "go.uber.org/zap" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" crlog "sigs.k8s.io/controller-runtime/pkg/runtime/log" + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) var ( diff --git a/cmd/controller/main.go b/cmd/controller/main.go index a508d7092a9..36d1653a0da 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -24,20 +24,13 @@ import ( "os" "time" + eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/logconfig" "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker" "github.com/knative/eventing/pkg/reconciler/v1alpha1/channel" "github.com/knative/eventing/pkg/reconciler/v1alpha1/namespace" "github.com/knative/eventing/pkg/reconciler/v1alpha1/subscription" "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/manager" - - // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - - eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "github.com/knative/eventing/pkg/logconfig" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "github.com/knative/pkg/configmap" "github.com/knative/pkg/logging" @@ -46,9 +39,14 @@ import ( "github.com/knative/pkg/system" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" controllerruntime "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) const ( diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 54fa882d9a6..5e2fd40c6c6 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -40,9 +40,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" - // uncomment this line to debug in GKE from local machine + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) +) var ( readTimeout = 1 * time.Minute @@ -156,7 +157,12 @@ func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.Wat func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) { channels := make([]v1alpha1.Channel, 0) cl := &v1alpha1.ChannelList{} - if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { + opts := &client.ListOptions{ + // Set Raw because if we need to get more than one page, then we will put the continue token + // into opts.Raw.Continue. + Raw: &metav1.ListOptions{}, + } + if err := c.List(ctx, opts, cl); err != nil { return nil, err } for _, c := range cl.Items { @@ -164,7 +170,11 @@ func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, channels = append(channels, c) } } - return channels, nil + if cl.Continue != "" { + opts.Raw.Continue = cl.Continue + } else { + return channels, nil + } } func shouldWatch(ch *v1alpha1.Channel) bool { diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index 37e45a43349..5f57b0165de 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -4,20 +4,20 @@ import ( "flag" "os" + provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" + "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" + eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/provisioners" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" - - provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" - "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" - eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "github.com/knative/eventing/pkg/provisioners" + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) // SchemeFunc adds types to a Scheme. diff --git a/contrib/kafka/main.go b/contrib/kafka/main.go index 316f2dbd521..ec0a9067282 100644 --- a/contrib/kafka/main.go +++ b/contrib/kafka/main.go @@ -4,20 +4,20 @@ import ( "flag" "os" + provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" + "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" + eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/provisioners" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" - - provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" - "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" - eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "github.com/knative/eventing/pkg/provisioners" + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). + //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) const ( diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index f0705452d44..497dae5adc8 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -41,9 +41,14 @@ const ( // AddFinalizerResult is used indicate whether a finalizer was added or already present. type AddFinalizerResult bool +// RemoveFinalizerResult is used to indicate whether a finalizer was found and removed (FinalizerRemoved), or finalizer not found (FinalizerNotFound). +type RemoveFinalizerResult bool + const ( - FinalizerAlreadyPresent AddFinalizerResult = false - FinalizerAdded AddFinalizerResult = true + FinalizerAlreadyPresent AddFinalizerResult = false + FinalizerAdded AddFinalizerResult = true + FinalizerRemoved RemoveFinalizerResult = true + FinalizerNotFound RemoveFinalizerResult = false ) // AddFinalizer adds finalizerName to the Object. @@ -57,16 +62,22 @@ func AddFinalizer(o metav1.Object, finalizerName string) AddFinalizerResult { return FinalizerAdded } -func RemoveFinalizer(o metav1.Object, finalizerName string) { +func RemoveFinalizer(o metav1.Object, finalizerName string) RemoveFinalizerResult { + result := FinalizerNotFound finalizers := sets.NewString(o.GetFinalizers()...) - finalizers.Delete(finalizerName) - o.SetFinalizers(finalizers.List()) + if finalizers.Has(finalizerName) { + result = FinalizerRemoved + finalizers.Delete(finalizerName) + o.SetFinalizers(finalizers.List()) + } + return result } -type k8sServiceOption func(*corev1.Service) error +// K8sServiceOption is a functional option that can modify the K8s Service in CreateK8sService +type K8sServiceOption func(*corev1.Service) error // ExternalService is a functional option for CreateK8sService to create a K8s service of type ExternalName. -func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { +func ExternalService(c *eventingv1alpha1.Channel) K8sServiceOption { return func(svc *corev1.Service) error { svc.Spec = corev1.ServiceSpec{ Type: corev1.ServiceTypeExternalName, @@ -76,7 +87,7 @@ func ExternalService(c *eventingv1alpha1.Channel) k8sServiceOption { } } -func CreateK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel, opts ...k8sServiceOption) (*corev1.Service, error) { +func CreateK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel, opts ...K8sServiceOption) (*corev1.Service, error) { getSvc := func() (*corev1.Service, error) { return getK8sService(ctx, client, c) } @@ -269,7 +280,7 @@ func UpdateChannel(ctx context.Context, client runtimeClient.Client, u *eventing // newK8sService creates a new Service for a Channel resource. It also sets the appropriate // OwnerReferences on the resource so handleObject can discover the Channel resource that 'owns' it. // As well as being garbage collected when the Channel is deleted. -func newK8sService(c *eventingv1alpha1.Channel, opts ...k8sServiceOption) (*corev1.Service, error) { +func newK8sService(c *eventingv1alpha1.Channel, opts ...K8sServiceOption) (*corev1.Service, error) { // Add annotations svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/provisioners/channel_util_test.go b/pkg/provisioners/channel_util_test.go index 4f89ffb7b56..848dda5d8f3 100644 --- a/pkg/provisioners/channel_util_test.go +++ b/pkg/provisioners/channel_util_test.go @@ -404,6 +404,35 @@ func TestAddFinalizer(t *testing.T) { } } +func TestRemoveFinalizer(t *testing.T) { + testCases := map[string]struct { + expected RemoveFinalizerResult + }{ + "Finalizer not found": { + expected: false, + }, + "Finalizer removed successfully": { + expected: true, + }, + } + finalizer := "test-finalizer" + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + c := getNewChannel() + if tc.expected { + c.Finalizers = []string{finalizer} + } else { + c.Finalizers = []string{} + } + actual := RemoveFinalizer(c, finalizer) + + if diff := cmp.Diff(actual, tc.expected); diff != "" { + t.Errorf("unexpected error (-want, +got) = %v", diff) + } + }) + } +} + func TestChannelNames(t *testing.T) { testCases := []struct { Name string diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go index 630f22b4d6b..5d5f8392a82 100644 --- a/pkg/provisioners/inmemory/channel/reconcile.go +++ b/pkg/provisioners/inmemory/channel/reconcile.go @@ -85,17 +85,13 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } logger.Info("Reconciling Channel") - // Finalizer needs to be removed (even though no finalizers are added) main back compat - // with v0.5 in which a finalzier was added. Or else channels will not get deleted after upgrading to 0.6 - // TODO: Remove this entire if block in v0.7+ - if c.DeletionTimestamp != nil { - // K8s garbage collection will delete the K8s service and VirtualService for this channel. - // We use a finalizer to ensure the channel config has been synced. - util.RemoveFinalizer(c, finalizerName) + // Finalizer needs to be removed (even though no finalizers are added) to maintain backwards compatibility + // with v0.5 in which a finalzier was added. Or else channels will not get deleted after upgrading to 0.6+ + if result := util.RemoveFinalizer(c, finalizerName); result == util.FinalizerRemoved { r.client.Update(ctx, c) - logger.Info("Channel reconciled") - r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q", c.Name) - return reconcile.Result{}, nil + logger.Info("Channel reconciled. Finalizer Removed") + r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q. Finalizer removed.", c.Name) + return reconcile.Result{Requeue: true}, nil } err = r.reconcile(ctx, c) diff --git a/pkg/provisioners/inmemory/channel/reconcile_test.go b/pkg/provisioners/inmemory/channel/reconcile_test.go index 2978419df42..0b221854a41 100644 --- a/pkg/provisioners/inmemory/channel/reconcile_test.go +++ b/pkg/provisioners/inmemory/channel/reconcile_test.go @@ -22,6 +22,8 @@ import ( "fmt" "testing" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" util "github.com/knative/eventing/pkg/provisioners" @@ -245,16 +247,17 @@ func TestReconcile(t *testing.T) { }, }, { - Name: "Channel deleted - finalizer removed", + Name: "Channel has finalizer (to test back compat with version <= 0.5, when finalizers were added", InitialState: []runtime.Object{ - makeDeletingChannel(), + makeChannelWithFinalizer(), }, WantPresent: []runtime.Object{ - makeDeletingChannelWithoutFinalizer(), + makeChannel(), }, WantEvent: []corev1.Event{ events[channelReconciled], }, + WantResult: reconcile.Result{Requeue: true}, }, { Name: "K8s service get fails", @@ -402,18 +405,6 @@ func makeChannelWithFinalizer() *eventingv1alpha1.Channel { return c } -func makeDeletingChannel() *eventingv1alpha1.Channel { - c := makeChannelWithFinalizer() - c.DeletionTimestamp = &deletionTime - return c -} - -func makeDeletingChannelWithoutFinalizer() *eventingv1alpha1.Channel { - c := makeDeletingChannel() - c.Finalizers = nil - return c -} - func makeK8sService(pn ...string) *corev1.Service { return &corev1.Service{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/provisioners/inmemory/controller/main.go b/pkg/provisioners/inmemory/controller/main.go index fbbdd262dff..577bdc948ce 100644 --- a/pkg/provisioners/inmemory/controller/main.go +++ b/pkg/provisioners/inmemory/controller/main.go @@ -29,7 +29,7 @@ import ( "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" - // uncomment this line to debug in GKE from local machine + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) From 16a6ffc29389e54f1f80c7ff89b9a23fb32bd6d6 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 14:13:34 -0700 Subject: [PATCH 14/37] Updates based on PR comments --- cmd/broker/ingress/main.go | 2 +- cmd/controller/main.go | 2 +- cmd/fanoutsidecar/main.go | 3 +-- contrib/kafka/cmd/controller/main.go | 2 +- contrib/kafka/main.go | 2 +- pkg/provisioners/inmemory/controller/main.go | 2 +- 6 files changed, 6 insertions(+), 7 deletions(-) diff --git a/cmd/broker/ingress/main.go b/cmd/broker/ingress/main.go index 4e1ec764f55..5317d44320c 100644 --- a/cmd/broker/ingress/main.go +++ b/cmd/broker/ingress/main.go @@ -46,7 +46,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" crlog "sigs.k8s.io/controller-runtime/pkg/runtime/log" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) var ( diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 36d1653a0da..82851fc1bce 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -46,7 +46,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) const ( diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 5e2fd40c6c6..24d793e07eb 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -41,8 +41,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" -) + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) var ( diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index 5f57b0165de..375361f4af3 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -17,7 +17,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) // SchemeFunc adds types to a Scheme. diff --git a/contrib/kafka/main.go b/contrib/kafka/main.go index ec0a9067282..ed98481c20b 100644 --- a/contrib/kafka/main.go +++ b/contrib/kafka/main.go @@ -17,7 +17,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) const ( diff --git a/pkg/provisioners/inmemory/controller/main.go b/pkg/provisioners/inmemory/controller/main.go index 577bdc948ce..2b09c992b4f 100644 --- a/pkg/provisioners/inmemory/controller/main.go +++ b/pkg/provisioners/inmemory/controller/main.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - //_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) func main() { From 67611dc715f72e199330b2c76c5a4d10a7ed920f Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 14:25:56 -0700 Subject: [PATCH 15/37] Fixed UTs --- cmd/fanoutsidecar/main.go | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 24d793e07eb..370289ffb5e 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -35,6 +35,7 @@ import ( "github.com/knative/eventing/pkg/sidecar/swappable" "go.uber.org/zap" "go.uber.org/zap/zapcore" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -155,24 +156,26 @@ func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.Wat func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) { channels := make([]v1alpha1.Channel, 0) - cl := &v1alpha1.ChannelList{} - opts := &client.ListOptions{ - // Set Raw because if we need to get more than one page, then we will put the continue token - // into opts.Raw.Continue. - Raw: &metav1.ListOptions{}, - } - if err := c.List(ctx, opts, cl); err != nil { - return nil, err - } - for _, c := range cl.Items { - if c.Status.IsReady() && shouldWatch(&c) { - channels = append(channels, c) + for { + cl := &v1alpha1.ChannelList{} + opts := &client.ListOptions{ + // Set Raw because if we need to get more than one page, then we will put the continue token + // into opts.Raw.Continue. + Raw: &metav1.ListOptions{}, + } + if err := c.List(ctx, opts, cl); err != nil { + return nil, err + } + for _, c := range cl.Items { + if c.Status.IsReady() && shouldWatch(&c) { + channels = append(channels, c) + } + } + if cl.Continue != "" { + opts.Raw.Continue = cl.Continue + } else { + return channels, nil } - } - if cl.Continue != "" { - opts.Raw.Continue = cl.Continue - } else { - return channels, nil } } From 2cc8525d17a627f2fde061564420cc1320b6e7cc Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 14:50:51 -0700 Subject: [PATCH 16/37] Updated VENDOR_LICENSE --- third_party/VENDOR-LICENSE | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/third_party/VENDOR-LICENSE b/third_party/VENDOR-LICENSE index e8d7037e247..697031fbfb7 100644 --- a/third_party/VENDOR-LICENSE +++ b/third_party/VENDOR-LICENSE @@ -627,40 +627,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -=========================================================== -Import: github.com/knative/eventing/vendor/github.com/fsnotify/fsnotify - -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - =========================================================== Import: github.com/knative/eventing/vendor/github.com/ghodss/yaml From 3b3f16f5b332c4a43adfa532ffb9a1ff7ad51d9b Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 15:46:44 -0700 Subject: [PATCH 17/37] WIP. Update fanout sidecar --- cmd/fanoutsidecar/main.go | 44 +------------------ pkg/channelwatcher/channel_watcher.go | 35 ++++++++++----- pkg/provisioners/channel_util.go | 8 ++-- .../inmemory/channel/reconcile.go | 1 - 4 files changed, 29 insertions(+), 59 deletions(-) diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 5ca9120c09d..93c1d161320 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -29,14 +29,9 @@ import ( "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/channelwatcher" - "github.com/knative/eventing/pkg/logging" - "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/sidecar/swappable" "go.uber.org/zap" "go.uber.org/zap/zapcore" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" @@ -136,48 +131,11 @@ func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfi logger.Error("Error while adding eventing scheme to manager.", zap.Error(err)) return nil, err } - channelwatcher.New(mgr, logger, updateChannelConfig(configUpdated)) + channelwatcher.New(mgr, logger, channelwatcher.UpdateChannelConfigWatchHandler(configUpdated, shouldWatch)) return mgr, nil } -func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.WatchHandlerFunc { - return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { - channels, err := listAllChannels(ctx, c) - if err != nil { - logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) - return err - } - config := multichannelfanout.NewConfigFromChannels(channels) - return updateConfig(config) - } -} - -func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) { - channels := make([]v1alpha1.Channel, 0) - for { - cl := &v1alpha1.ChannelList{} - opts := &client.ListOptions{ - // Set Raw because if we need to get more than one page, then we will put the continue token - // into opts.Raw.Continue. - Raw: &metav1.ListOptions{}, - } - if err := c.List(ctx, opts, cl); err != nil { - return nil, err - } - for _, c := range cl.Items { - if c.Status.IsReady() && shouldWatch(&c) { - channels = append(channels, c) - } - } - if cl.Continue != "" { - opts.Raw.Continue = cl.Continue - } else { - return channels, nil - } - } -} - func shouldWatch(ch *v1alpha1.Channel) bool { if ch.Spec.Provisioner != nil && ch.Spec.Provisioner.Namespace == "" { for _, v := range channelProvisioners { diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index 8c687d01690..e81557543a4 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -3,18 +3,18 @@ package channelwatcher import ( "context" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/sidecar/swappable" "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) @@ -86,14 +86,25 @@ func UpdateChannelConfigWatchHandler(updateConfig swappable.UpdateConfig, should // ListAllChannels queries client and gets list of all channels for which shouldWatch returns true. func listAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) { channels := make([]v1alpha1.Channel, 0) - cl := &v1alpha1.ChannelList{} - if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { - return nil, err - } - for _, c := range cl.Items { - if c.Status.IsReady() && shouldWatch(&c) { - channels = append(channels, c) + for { + cl := &v1alpha1.ChannelList{} + opts := &client.ListOptions{ + // Set Raw because if we need to get more than one page, then we will put the continue token + // into opts.Raw.Continue. + Raw: &metav1.ListOptions{}, + } + if err := c.List(ctx, opts, cl); err != nil { + return nil, err + } + for _, c := range cl.Items { + if c.Status.IsReady() && shouldWatch(&c) { + channels = append(channels, c) + } + } + if cl.Continue != "" { + opts.Raw.Continue = cl.Continue + } else { + return channels, nil } } - return channels, nil } diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index 497dae5adc8..a252465d37e 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -62,15 +62,17 @@ func AddFinalizer(o metav1.Object, finalizerName string) AddFinalizerResult { return FinalizerAdded } +// RemoveFinalizer removes the finalizer(finalizerName) from the object(o) if the finalizer is present. +// Returns: - FinalizerRemoved, if the finalizer was found and removed. +// - FinalizerNotFound, if the finalizer was not found. func RemoveFinalizer(o metav1.Object, finalizerName string) RemoveFinalizerResult { - result := FinalizerNotFound finalizers := sets.NewString(o.GetFinalizers()...) if finalizers.Has(finalizerName) { - result = FinalizerRemoved finalizers.Delete(finalizerName) o.SetFinalizers(finalizers.List()) + return FinalizerRemoved } - return result + return FinalizerNotFound } // K8sServiceOption is a functional option that can modify the K8s Service in CreateK8sService diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go index 8d1368b6bf1..5d5f8392a82 100644 --- a/pkg/provisioners/inmemory/channel/reconcile.go +++ b/pkg/provisioners/inmemory/channel/reconcile.go @@ -109,7 +109,6 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err r.recorder.Eventf(c, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update Channel's status: %v", err) return reconcile.Result{}, updateStatusErr } - return reconcile.Result{}, err } From f065d2292ca4cd3a691afc3431ee9cbc69ce6408 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 15:54:34 -0700 Subject: [PATCH 18/37] Merge from upstream master --- pkg/channelwatcher/channel_watcher.go | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index fdbcb539432..98f53314391 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -3,7 +3,6 @@ package channelwatcher import ( "context" -<<<<<<< HEAD "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/sidecar/multichannelfanout" @@ -19,23 +18,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -======= - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "github.com/knative/eventing/pkg/logging" - "go.uber.org/zap" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error - ->>>>>>> f8317dd0ead16253fcea1a61a749bb228708f117 type reconciler struct { client client.Client logger *zap.Logger @@ -52,10 +34,7 @@ func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) return reconcile.Result{}, nil } -<<<<<<< HEAD // New creates a new instance of Channel Watcher that watches channels and calls the watchHandler on add, update, delete and generic event -======= ->>>>>>> f8317dd0ead16253fcea1a61a749bb228708f117 func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) error { c, err := controller.New("ChannelWatcher", mgr, controller.Options{ Reconciler: &reconciler{ @@ -79,7 +58,6 @@ func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) } return nil } -<<<<<<< HEAD // WatchHandlerFunc is called whenever an add, update, delete or generic event is triggers on a channel watch type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error @@ -105,7 +83,7 @@ func UpdateChannelConfigWatchHandler(updateConfig swappable.UpdateConfig, should } } -// ListAllChannels queries client and gets list of all channels for which shouldWatch returns true. +// listAllChannels queries client and gets list of all channels for which shouldWatch returns true. func listAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) { channels := make([]v1alpha1.Channel, 0) for { @@ -130,5 +108,3 @@ func listAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWat } } } -======= ->>>>>>> f8317dd0ead16253fcea1a61a749bb228708f117 From a645dcea4231ead306485126d8f2afe147dd787b Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 15 Apr 2019 17:31:19 -0700 Subject: [PATCH 19/37] UTs pass, ITs passed. COde ready for PR --- cmd/fanoutsidecar/main.go | 2 +- contrib/kafka/cmd/dispatcher/main.go | 2 +- contrib/kafka/config/kafka.yaml | 2 +- contrib/kafka/pkg/dispatcher/dispatcher.go | 10 ++++--- .../kafka/pkg/dispatcher/dispatcher_test.go | 2 +- pkg/channelwatcher/channel_watcher.go | 6 ++-- pkg/channelwatcher/channel_watcher_test.go | 4 +-- pkg/provisioners/channel_util.go | 2 +- pkg/provisioners/message_receiver.go | 30 +++++++++---------- pkg/reconciler/testing/table.go | 6 ++-- 10 files changed, 33 insertions(+), 33 deletions(-) diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go index 93c1d161320..fc1927d0f01 100644 --- a/cmd/fanoutsidecar/main.go +++ b/cmd/fanoutsidecar/main.go @@ -131,7 +131,7 @@ func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfi logger.Error("Error while adding eventing scheme to manager.", zap.Error(err)) return nil, err } - channelwatcher.New(mgr, logger, channelwatcher.UpdateChannelConfigWatchHandler(configUpdated, shouldWatch)) + channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(configUpdated, shouldWatch)) return mgr, nil } diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index 7f15663181c..1ab05718539 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -56,7 +56,7 @@ func main() { } v1alpha1.AddToScheme(mgr.GetScheme()) - channelwatcher.New(mgr, logger, channelwatcher.UpdateChannelConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)) + channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)) if err != nil { logger.Fatal("Unable to create channel watcher.", zap.Error(err)) } diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index 82298b56519..c58fb14296b 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -108,7 +108,7 @@ metadata: namespace: knative-eventing data: # Broker URL's for the provisioner. Replace this with the URL's for your kafka cluster. - bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 + bootstrap_servers: kafkabroker.kafka:9092 # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index bfe4790d062..9c84f6f4d4a 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -34,6 +34,7 @@ import ( ) type KafkaDispatcher struct { + // TODO: config doesn't have to be atomic as it is read an updated using updateLock. config atomic.Value hostToChannelMap atomic.Value updateLock sync.Mutex @@ -42,8 +43,10 @@ type KafkaDispatcher struct { dispatcher *provisioners.MessageDispatcher kafkaAsyncProducer sarama.AsyncProducer - kafkaConsumers map[provisioners.ChannelReference]map[subscription]KafkaConsumer - kafkaCluster KafkaCluster + // TODO: kafkaConsumer map should probably be atomic as it is updated and read on separate go routines with no syncchronization. + // Verify if this is an issue and fix accordignly + kafkaConsumers map[provisioners.ChannelReference]map[subscription]KafkaConsumer + kafkaCluster KafkaCluster logger *zap.Logger } @@ -140,7 +143,6 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error // Update the config so that it can be used for comparison during next sync d.setConfig(config) - } return nil } @@ -150,7 +152,7 @@ func createHostToChannelMap(config *multichannelfanout.Config) (map[string]provi for _, cConfig := range config.ChannelConfigs { if cr, ok := hcMap[cConfig.HostName]; ok { return nil, fmt.Errorf( - "Duplicate hostName found. HostName:%s, channel:%s.%s, channel:%s.%s", + "Duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s", cConfig.HostName, cConfig.Namespace, cConfig.Name, diff --git a/contrib/kafka/pkg/dispatcher/dispatcher_test.go b/contrib/kafka/pkg/dispatcher/dispatcher_test.go index 9d0a32222fb..9e697e83262 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher_test.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher_test.go @@ -406,7 +406,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { }, }, }, - createErr: "Duplicate hostName found. HostName:a.b.c.d, channel:default.test-channel-2, channel:default.test-channel-1", + createErr: "Duplicate hostName found. Each channel must have a unique host header. HostName:a.b.c.d, channel:default.test-channel-2, channel:default.test-channel-1", oldHostToChanMap: map[string]provisioners.ChannelReference{}, }, } diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index 98f53314391..8837ffc8747 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -59,19 +59,19 @@ func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) return nil } -// WatchHandlerFunc is called whenever an add, update, delete or generic event is triggers on a channel watch +// WatchHandlerFunc is called whenever an add, update, delete or generic event is triggered on a channel type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error // ShouldWatchFunc is called while returning list of channels. // Channels are included in the list if the return value is true. type ShouldWatchFunc func(ch *v1alpha1.Channel) bool -// UpdateChannelConfigWatchHandler is a special handler that +// UpdateConfigWatchHandler is a special handler that // 1. Lists the channels for which shouldWatch returns true // 2. Creates a multi-channel-fanout-config // 3. Calls the updateConfig func with the new multi-channel-fanout-config // This is used by dispatchers or receivers to update their configs by watching channels -func UpdateChannelConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch ShouldWatchFunc) WatchHandlerFunc { +func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch ShouldWatchFunc) WatchHandlerFunc { return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { channels, err := listAllChannels(ctx, c, shouldWatch) if err != nil { diff --git a/pkg/channelwatcher/channel_watcher_test.go b/pkg/channelwatcher/channel_watcher_test.go index b3d5395b7ba..56f9f772873 100644 --- a/pkg/channelwatcher/channel_watcher_test.go +++ b/pkg/channelwatcher/channel_watcher_test.go @@ -26,7 +26,7 @@ func init() { _ = v1alpha1.AddToScheme(scheme.Scheme) } -func TestUpdateChannelConfigWatchHandler(t *testing.T) { +func TestUpdateConfigWatchHandler(t *testing.T) { tests := []struct { name string channels []runtime.Object @@ -82,7 +82,7 @@ func TestUpdateChannelConfigWatchHandler(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { actualConfig := ConfigHolder{} - watchHandler := UpdateChannelConfigWatchHandler(updateConfigWrapper(&actualConfig, test.updateConfigError), shouldWatch) + watchHandler := UpdateConfigWatchHandler(updateConfigWrapper(&actualConfig, test.updateConfigError), shouldWatch) mockClient := getClient(test.channels, getClientMocks(test.clientListError)) actualError := watchHandler(context.TODO(), mockClient, types.NamespacedName{}) diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index a252465d37e..d61205c6897 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -64,7 +64,7 @@ func AddFinalizer(o metav1.Object, finalizerName string) AddFinalizerResult { // RemoveFinalizer removes the finalizer(finalizerName) from the object(o) if the finalizer is present. // Returns: - FinalizerRemoved, if the finalizer was found and removed. -// - FinalizerNotFound, if the finalizer was not found. +// - FinalizerNotFound, if the finalizer was not found. func RemoveFinalizer(o metav1.Object, finalizerName string) RemoveFinalizerResult { finalizers := sets.NewString(o.GetFinalizers()...) if finalizers.Has(finalizerName) { diff --git a/pkg/provisioners/message_receiver.go b/pkg/provisioners/message_receiver.go index b4fceb1022c..4c9bac7f93a 100644 --- a/pkg/provisioners/message_receiver.go +++ b/pkg/provisioners/message_receiver.go @@ -32,7 +32,7 @@ const ( MessageReceiverPort = 8080 ) -// Message receiver receives messages. +// MessageReceiver receives messages. type MessageReceiver struct { receiverFunc func(ChannelReference, *Message) error forwardHeaders sets.String @@ -41,14 +41,16 @@ type MessageReceiver struct { hostToChannelFunc ResolveChannelFromHostFunc } -type receiverOptions func(*MessageReceiver) error +// ReceiverOptions provides functional options to MessageReceiver function +type ReceiverOptions func(*MessageReceiver) error +// ResolveChannelFromHostFunc function enables MessageReceiver to get the Channel Reference from incoming request HostHeader +// before calling receiverFunc type ResolveChannelFromHostFunc func(string) (ChannelReference, error) -// ResolveChannelFromHostHeader is a receiverOption that enables the consumer of the MessageReceiver -// to pass a map[]ChannelReference. This map will then be used to to get the ChannelReference -// from httpRequest.Host before calling receiverFunc -func ResolveChannelFromHostHeader(hostToChannelFunc ResolveChannelFromHostFunc) receiverOptions { +// ResolveChannelFromHostHeader is a ReceiverOption for NewMessageReceiver which enables the caller to overwrite the +// default behaviour defined by ParseChannel function +func ResolveChannelFromHostHeader(hostToChannelFunc ResolveChannelFromHostFunc) ReceiverOptions { return func(r *MessageReceiver) error { r.hostToChannelFunc = hostToChannelFunc return nil @@ -57,23 +59,19 @@ func ResolveChannelFromHostHeader(hostToChannelFunc ResolveChannelFromHostFunc) // NewMessageReceiver creates a message receiver passing new messages to the // receiverFunc. -func NewMessageReceiver(receiverFunc func(ChannelReference, *Message) error, logger *zap.SugaredLogger, opts ...receiverOptions) (*MessageReceiver, error) { +func NewMessageReceiver(receiverFunc func(ChannelReference, *Message) error, logger *zap.SugaredLogger, opts ...ReceiverOptions) (*MessageReceiver, error) { receiver := &MessageReceiver{ - receiverFunc: receiverFunc, - forwardHeaders: sets.NewString(forwardHeaders...), - forwardPrefixes: forwardPrefixes, - - logger: logger, + receiverFunc: receiverFunc, + forwardHeaders: sets.NewString(forwardHeaders...), + forwardPrefixes: forwardPrefixes, + hostToChannelFunc: ResolveChannelFromHostFunc(ParseChannel), + logger: logger, } for _, opt := range opts { if err := opt(receiver); err != nil { return nil, err } } - // Default to old behaviour of host = channelName.channelNamespace - if receiver.hostToChannelFunc == nil { - receiver.hostToChannelFunc = ResolveChannelFromHostFunc(ParseChannel) - } return receiver, nil } diff --git a/pkg/reconciler/testing/table.go b/pkg/reconciler/testing/table.go index d26d57a4cf1..9bf2e2334e6 100644 --- a/pkg/reconciler/testing/table.go +++ b/pkg/reconciler/testing/table.go @@ -149,7 +149,7 @@ func (tc *TestCase) GetDynamicClient() dynamic.Interface { // GetClient returns the mockClient to use for this test case. func (tc *TestCase) GetClient() *MockClient { - builtObjects := BuildAllObjects(tc.InitialState) + builtObjects := buildAllObjects(tc.InitialState) innerClient := fake.NewFakeClient(builtObjects...) return NewMockClient(innerClient, tc.Mocks) } @@ -224,7 +224,7 @@ func (se stateErrors) Error() string { // to be present after reconciliation. func (tc *TestCase) VerifyWantPresent(c client.Client) error { var errs stateErrors - builtObjects := BuildAllObjects(tc.WantPresent) + builtObjects := buildAllObjects(tc.WantPresent) for _, wp := range builtObjects { o, err := scheme.Scheme.New(wp.GetObjectKind().GroupVersionKind()) if err != nil { @@ -304,7 +304,7 @@ func getEventsAsString(events []corev1.Event) []string { return eventsAsString } -func BuildAllObjects(objs []runtime.Object) []runtime.Object { +func buildAllObjects(objs []runtime.Object) []runtime.Object { builtObjs := []runtime.Object{} for _, obj := range objs { if builder, ok := obj.(Buildable); ok { From fdc4b57c07d570efafc2b4b0508688fceb06cbf9 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Tue, 16 Apr 2019 14:24:43 -0700 Subject: [PATCH 20/37] Update natss to not use ISTIO. UTs and E2E tests pass. --- contrib/natss/config/broker/natss.yaml | 2 - .../natss/pkg/controller/channel/reconcile.go | 8 +-- .../pkg/controller/channel/reconcile_test.go | 45 +++++++++++-- .../natss/pkg/dispatcher/channel/reconcile.go | 13 ++++ .../pkg/dispatcher/dispatcher/dispatcher.go | 58 ++++++++++++++++- .../dispatcher/dispatcher/dispatcher_test.go | 64 +++++++++++++++++++ pkg/channelwatcher/channel_watcher.go | 6 +- 7 files changed, 179 insertions(+), 17 deletions(-) diff --git a/contrib/natss/config/broker/natss.yaml b/contrib/natss/config/broker/natss.yaml index df040e07b98..2f1bb433fbd 100644 --- a/contrib/natss/config/broker/natss.yaml +++ b/contrib/natss/config/broker/natss.yaml @@ -62,8 +62,6 @@ spec: app: nats-streaming template: metadata: - annotations: - sidecar.istio.io/inject: "true" labels: *labels spec: containers: diff --git a/contrib/natss/pkg/controller/channel/reconcile.go b/contrib/natss/pkg/controller/channel/reconcile.go index 4eb9445bccd..2429d5eff62 100644 --- a/contrib/natss/pkg/controller/channel/reconcile.go +++ b/contrib/natss/pkg/controller/channel/reconcile.go @@ -122,19 +122,13 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) return nil } - svc, err := provisioners.CreateK8sService(ctx, r.client, c) + svc, err := provisioners.CreateK8sService(ctx, r.client, c, provisioners.ExternalService(c)) if err != nil { r.logger.Info("Error creating the Channel's K8s Service", zap.Error(err)) return err } c.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace)) - _, err = provisioners.CreateVirtualService(ctx, r.client, c, svc) - if err != nil { - r.logger.Info("Error creating the Virtual Service for the Channel", zap.Error(err)) - return err - } - c.Status.MarkProvisioned() return nil } diff --git a/contrib/natss/pkg/controller/channel/reconcile_test.go b/contrib/natss/pkg/controller/channel/reconcile_test.go index 18d0cf5291a..37a9c5e618b 100644 --- a/contrib/natss/pkg/controller/channel/reconcile_test.go +++ b/contrib/natss/pkg/controller/channel/reconcile_test.go @@ -22,11 +22,12 @@ import ( eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" - util "github.com/knative/eventing/pkg/provisioners" + "github.com/knative/eventing/pkg/reconciler/names" controllertesting "github.com/knative/eventing/pkg/reconciler/testing" "github.com/knative/eventing/pkg/utils" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "github.com/knative/pkg/system" _ "github.com/knative/pkg/system/testing" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -65,12 +66,15 @@ var testCases = []controllertesting.TestCase{ InitialState: []runtime.Object{ makeNewClusterChannelProvisioner(clusterChannelProvisionerName, true), makeNewChannel(channelName, clusterChannelProvisionerName), - makeVirtualService(), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, WantPresent: []runtime.Object{ makeNewChannelProvisionedStatus(channelName, clusterChannelProvisionerName), + makeK8sService(channelName, clusterChannelProvisionerName), + }, + WantAbsent: []runtime.Object{ + makeVirtualService(), }, IgnoreTimes: true, }, @@ -213,7 +217,6 @@ func makeNewClusterChannelProvisioner(name string, isReady bool) *eventingv1alph clusterChannelProvisioner.ObjectMeta.SelfLink = "" return clusterChannelProvisioner } - func makeVirtualService() *istiov1alpha3.VirtualService { return &istiov1alpha3.VirtualService{ TypeMeta: metav1.TypeMeta{ @@ -251,7 +254,7 @@ func makeVirtualService() *istiov1alpha3.VirtualService { Destination: istiov1alpha3.Destination{ Host: "kafka-provisioner.knative-eventing.svc." + utils.GetClusterDomainName(), Port: istiov1alpha3.PortSelector{ - Number: util.PortNumber, + Number: provisioners.PortNumber, }, }}, }}, @@ -265,5 +268,39 @@ func om(namespace, name string) metav1.ObjectMeta { Namespace: namespace, Name: name, SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name), + UID: testUID, + } +} + +func makeK8sService(channelName string, clusterChannelProvisionerName string) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: fmt.Sprintf("%s-channel-", channelName), + Namespace: testNS, + Labels: map[string]string{ + provisioners.EventingChannelLabel: channelName, + provisioners.OldEventingChannelLabel: channelName, + provisioners.EventingProvisionerLabel: clusterChannelProvisionerName, + provisioners.OldEventingProvisionerLabel: clusterChannelProvisionerName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), + Kind: "Channel", + Name: channelName, + UID: testUID, + Controller: &truePointer, + BlockOwnerDeletion: &truePointer, + }, + }, + }, + Spec: corev1.ServiceSpec{ + ExternalName: names.ServiceHostName(fmt.Sprintf("%s-dispatcher", clusterChannelProvisionerName), system.Namespace()), + Type: "ExternalName", + }, } } diff --git a/contrib/natss/pkg/dispatcher/channel/reconcile.go b/contrib/natss/pkg/dispatcher/channel/reconcile.go index dda10e27f59..f9af9763321 100644 --- a/contrib/natss/pkg/dispatcher/channel/reconcile.go +++ b/contrib/natss/pkg/dispatcher/channel/reconcile.go @@ -29,6 +29,7 @@ import ( ccpcontroller "github.com/knative/eventing/contrib/natss/pkg/controller/clusterchannelprovisioner" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/channelwatcher" ) type reconciler struct { @@ -122,5 +123,17 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) r.logger.Error("UpdateSubscriptions() failed: ", zap.Error(err)) return false, err } + + chanList, err := channelwatcher.ListAllChannels(ctx, r.client, r.shouldReconcile) + if err != nil { + r.logger.Error("Error getting channel list", zap.Error(err)) + return false, err + } + + if err := r.subscriptionsSupervisor.UpdateHostToChannelMap(ctx, chanList); err != nil { + r.logger.Error("Error updating host to channel map", zap.Error(err)) + return false, err + } + return false, nil } diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go index e2ca29c959d..4599d73837e 100644 --- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go +++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go @@ -17,12 +17,15 @@ limitations under the License. package dispatcher import ( + "context" "encoding/json" "fmt" "sync" + "sync/atomic" "time" "github.com/knative/eventing/contrib/natss/pkg/stanutil" + "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/provisioners" stan "github.com/nats-io/go-nats-streaming" "go.uber.org/zap" @@ -59,6 +62,9 @@ type SubscriptionsSupervisor struct { natssConnMux sync.Mutex natssConn *stan.Conn natssConnInProgress bool + + hostToChannelMapMutex sync.Mutex + hostToChannelMap atomic.Value } // NewDispatcher returns a new SubscriptionsSupervisor. @@ -71,11 +77,15 @@ func NewDispatcher(natssURL, clusterID string, logger *zap.Logger) (*Subscriptio clusterID: clusterID, subscriptions: make(map[provisioners.ChannelReference]map[subscriptionReference]*stan.Subscription), } - receiver, err := provisioners.NewMessageReceiver(createReceiverFunction(d, logger.Sugar()), logger.Sugar()) + receiver, err := provisioners.NewMessageReceiver( + createReceiverFunction(d, logger.Sugar()), + logger.Sugar(), + provisioners.ResolveChannelFromHostHeader(provisioners.ResolveChannelFromHostFunc(d.getChannelReferenceFromHost))) if err != nil { return nil, err } d.receiver = receiver + d.setHostToChannelMap(map[string]provisioners.ChannelReference{}) return d, nil } @@ -291,3 +301,49 @@ func (s *SubscriptionsSupervisor) unsubscribe(channel provisioners.ChannelRefere func getSubject(channel provisioners.ChannelReference) string { return channel.Name + "." + channel.Namespace } + +func (s *SubscriptionsSupervisor) getHostToChannelMap() map[string]provisioners.ChannelReference { + return s.hostToChannelMap.Load().(map[string]provisioners.ChannelReference) +} + +func (s *SubscriptionsSupervisor) setHostToChannelMap(hcMap map[string]provisioners.ChannelReference) { + s.hostToChannelMap.Store(hcMap) +} + +// UpdateHostToChannelMap will be called from the controller that watches natss channels. +// It will update internal hostToChannelMap which is used to resolve the hostHeader of the +// incoming request to the correct ChannelReference in the receiver function. +func (s *SubscriptionsSupervisor) UpdateHostToChannelMap(ctx context.Context, chanList []eventingv1alpha1.Channel) error { + logging.FromContext(ctx).Info("UpdateHostToChannelMap: Acquiring mutex lock") + s.hostToChannelMapMutex.Lock() + defer s.hostToChannelMapMutex.Unlock() + logging.FromContext(ctx).Info("UpdateHostToChannelMap: Acquired mutex lock. Updating internal map") + + hostToChanMap := make(map[string]provisioners.ChannelReference, len(chanList)) + for _, c := range chanList { + hostName := c.Status.Address.Hostname + if cr, ok := hostToChanMap[hostName]; ok { + return fmt.Errorf( + "Duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s", + hostName, + c.Namespace, + c.Name, + cr.Namespace, + cr.Name) + } + hostToChanMap[hostName] = provisioners.ChannelReference{Name: c.Name, Namespace: c.Namespace} + } + + s.setHostToChannelMap(hostToChanMap) + logging.FromContext(ctx).Info("UpdateHostToChannelMap: Update successful. Releasing mutex lock") + return nil +} + +func (s *SubscriptionsSupervisor) getChannelReferenceFromHost(host string) (provisioners.ChannelReference, error) { + chMap := s.getHostToChannelMap() + cr, ok := chMap[host] + if !ok { + return cr, fmt.Errorf("Invalid HostName:%s. HostName not found in any of the watched natss channels", host) + } + return cr, nil +} diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go index 3af212f2811..13547dc90b4 100644 --- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go +++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go @@ -17,11 +17,13 @@ limitations under the License. package dispatcher import ( + "context" "encoding/json" "os" "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/knative/eventing/contrib/natss/pkg/stanutil" "github.com/knative/eventing/pkg/apis/duck/v1alpha1" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" @@ -207,6 +209,68 @@ func TestUpdateSubscriptions(t *testing.T) { } } +func TestUpdateHostToChannelMap(t *testing.T) { + tests := []struct { + name string + chanList []eventingv1alpha1.Channel + expected map[string]provisioners.ChannelReference + expectedErrorString string + }{ + { + name: "Empty channel list", + expected: map[string]provisioners.ChannelReference{}, + }, { + name: "Duplicate host name", + chanList: []eventingv1alpha1.Channel{ + *makechannel("chan1", "ns1", "host1"), + *makechannel("chan2", "ns2", "host2"), + *makechannel("chan3", "ns3", "host2"), + }, + expected: map[string]provisioners.ChannelReference{}, + expectedErrorString: "Duplicate hostName found. Each channel must have a unique host header. HostName:host2, channel:ns3.chan3, channel:ns2.chan2", + }, { + name: "Valid list of channels", + chanList: []eventingv1alpha1.Channel{ + *makechannel("chan1", "ns1", "host1"), + *makechannel("chan2", "ns2", "host2"), + *makechannel("chan3", "ns3", "host3"), + }, + expected: map[string]provisioners.ChannelReference{ + "host1": provisioners.ChannelReference{Name: "chan1", Namespace: "ns1"}, + "host2": provisioners.ChannelReference{Name: "chan2", Namespace: "ns2"}, + "host3": provisioners.ChannelReference{Name: "chan3", Namespace: "ns3"}, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + s.setHostToChannelMap(map[string]provisioners.ChannelReference{}) + err := s.UpdateHostToChannelMap(context.TODO(), test.chanList) + + if err != nil { + if diff := cmp.Diff(test.expectedErrorString, err.Error()); diff != "" { + t.Fatalf("Unexpected difference (-want +got): %v", diff) + } + } + + if diff := cmp.Diff(test.expected, s.getHostToChannelMap()); diff != "" { + t.Fatalf("Unexpected difference (-want +got): %v", diff) + } + }) + } +} + +func makechannel(name string, namespace string, hostname string) *eventingv1alpha1.Channel { + c := eventingv1alpha1.Channel{} + c.Name = name + c.Namespace = namespace + c.Status.InitializeConditions() + c.Status.MarkProvisioned() + c.Status.MarkProvisionerInstalled() + c.Status.SetAddress(hostname) + return &c +} + func startNatss() (*server.StanServer, error) { logger.Infof("Start NATSS") var ( diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index 8837ffc8747..90a687b8e67 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -73,7 +73,7 @@ type ShouldWatchFunc func(ch *v1alpha1.Channel) bool // This is used by dispatchers or receivers to update their configs by watching channels func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch ShouldWatchFunc) WatchHandlerFunc { return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { - channels, err := listAllChannels(ctx, c, shouldWatch) + channels, err := ListAllChannels(ctx, c, shouldWatch) if err != nil { logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) return err @@ -83,8 +83,8 @@ func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch S } } -// listAllChannels queries client and gets list of all channels for which shouldWatch returns true. -func listAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) { +// ListAllChannels queries client and gets list of all channels for which shouldWatch returns true. +func ListAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) { channels := make([]v1alpha1.Channel, 0) for { cl := &v1alpha1.ChannelList{} From 09e4dfa03adc27e7bf516a3bddb5fcd98d2e71e7 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Tue, 16 Apr 2019 14:34:18 -0700 Subject: [PATCH 21/37] Updates based on PR comments --- contrib/kafka/cmd/dispatcher/main.go | 8 +++++--- pkg/channelwatcher/channel_watcher.go | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index 1ab05718539..b0d1c2ac286 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -55,9 +55,11 @@ func main() { logger.Fatal("Unable to add kafkaDispatcher", zap.Error(err)) } - v1alpha1.AddToScheme(mgr.GetScheme()) - channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)) - if err != nil { + if err := v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { + logger.Fatal("Unable to add scheme for eventing apis.", zap.Error(err)) + } + + if err := channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)); err != nil { logger.Fatal("Unable to create channel watcher.", zap.Error(err)) } diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index 8837ffc8747..ff8a7852e65 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -72,7 +72,7 @@ type ShouldWatchFunc func(ch *v1alpha1.Channel) bool // 3. Calls the updateConfig func with the new multi-channel-fanout-config // This is used by dispatchers or receivers to update their configs by watching channels func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch ShouldWatchFunc) WatchHandlerFunc { - return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error { + return func(ctx context.Context, c client.Client, _ types.NamespacedName) error { channels, err := listAllChannels(ctx, c, shouldWatch) if err != nil { logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err)) From eb76bcdaa7a1740547f1ffb35f3d4a1eb590fcdd Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Tue, 16 Apr 2019 16:23:14 -0700 Subject: [PATCH 22/37] REmoved permission to istio virtual service from controller --- contrib/natss/config/provisioner.yaml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/contrib/natss/config/provisioner.yaml b/contrib/natss/config/provisioner.yaml index 681146deca6..6790bbe7b1d 100644 --- a/contrib/natss/config/provisioner.yaml +++ b/contrib/natss/config/provisioner.yaml @@ -55,16 +55,6 @@ rules: - watch - create - update - - apiGroups: - - networking.istio.io - resources: - - virtualservices - verbs: - - get - - list - - watch - - create - - update --- From 1de9e3880e01c640218e1942ed426b0df3650d5b Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Wed, 17 Apr 2019 16:43:21 -0700 Subject: [PATCH 23/37] WIP --- contrib/gcppubsub/config/gcppubsub.yaml | 19 -- .../pkg/controller/channel/controller.go | 10 - .../pkg/controller/channel/reconcile.go | 57 ++--- .../pkg/controller/channel/reconcile_test.go | 198 +++--------------- contrib/gcppubsub/pkg/dispatcher/cmd/main.go | 14 +- .../pkg/dispatcher/dispatcher/controller.go | 5 +- .../pkg/dispatcher/dispatcher/reconcile.go | 25 ++- .../dispatcher/dispatcher/reconcile_test.go | 51 ++++- .../pkg/dispatcher/receiver/receiver.go | 56 ++++- .../pkg/dispatcher/receiver/receiver_test.go | 149 +++++++++++-- .../pkg/dispatcher/dispatcher/dispatcher.go | 20 +- pkg/channelwatcher/channel_watcher.go | 30 +-- pkg/provisioners/channel_util.go | 19 ++ 13 files changed, 336 insertions(+), 317 deletions(-) diff --git a/contrib/gcppubsub/config/gcppubsub.yaml b/contrib/gcppubsub/config/gcppubsub.yaml index c05493180c8..ef22343e158 100644 --- a/contrib/gcppubsub/config/gcppubsub.yaml +++ b/contrib/gcppubsub/config/gcppubsub.yaml @@ -61,25 +61,6 @@ rules: verbs: - create - update - - apiGroups: - - networking.istio.io - resources: - - virtualservices - verbs: - - get - - list - - watch - - create - - update - - apiGroups: - - "" # Core API Group. - resources: - - events - verbs: - - create - - patch - - update - --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/contrib/gcppubsub/pkg/controller/channel/controller.go b/contrib/gcppubsub/pkg/controller/channel/controller.go index 847419fd60c..3248d2bbaed 100644 --- a/contrib/gcppubsub/pkg/controller/channel/controller.go +++ b/contrib/gcppubsub/pkg/controller/channel/controller.go @@ -19,7 +19,6 @@ package channel import ( pubsubutil "github.com/knative/eventing/contrib/gcppubsub/pkg/util" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -74,15 +73,6 @@ func ProvideController(defaultGcpProject string, defaultSecret *corev1.ObjectRef return nil, err } - // Watch the VirtualServices that are owned by Channels. - err = c.Watch(&source.Kind{ - Type: &istiov1alpha3.VirtualService{}, - }, &handler.EnqueueRequestForOwner{OwnerType: &eventingv1alpha1.Channel{}, IsController: true}) - if err != nil { - logger.Error("Unable to watch VirtualServices.", zap.Error(err)) - return nil, err - } - return c, nil } } diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile.go b/contrib/gcppubsub/pkg/controller/channel/reconcile.go index b446f11de3c..61ce3945637 100644 --- a/contrib/gcppubsub/pkg/controller/channel/reconcile.go +++ b/contrib/gcppubsub/pkg/controller/channel/reconcile.go @@ -48,21 +48,20 @@ const ( noNeedToPersist // Name of the corev1.Events emitted from the reconciliation process - channelReconciled = "ChannelReconciled" - channelUpdateStatusFailed = "ChannelUpdateStatusFailed" - channelReadStatusFailed = "ChannelReadStatusFailed" - gcpCredentialsReadFailed = "GcpCredentialsReadFailed" - gcpResourcesPlanFailed = "GcpResourcesPlanFailed" - gcpResourcesPersistFailed = "GcpResourcesPersistFailed" - virtualServiceCreateFailed = "VirtualServiceCreateFailed" - k8sServiceCreateFailed = "K8sServiceCreateFailed" - topicCreateFailed = "TopicCreateFailed" - topicDeleteFailed = "TopicDeleteFailed" - subscriptionSyncFailed = "SubscriptionSyncFailed" - subscriptionDeleteFailed = "SubscriptionDeleteFailed" + channelReconciled = "ChannelReconciled" + channelUpdateStatusFailed = "ChannelUpdateStatusFailed" + channelReadStatusFailed = "ChannelReadStatusFailed" + gcpCredentialsReadFailed = "GcpCredentialsReadFailed" + gcpResourcesPlanFailed = "GcpResourcesPlanFailed" + gcpResourcesPersistFailed = "GcpResourcesPersistFailed" + k8sServiceCreateFailed = "K8sServiceCreateFailed" + topicCreateFailed = "TopicCreateFailed" + topicDeleteFailed = "TopicDeleteFailed" + subscriptionSyncFailed = "SubscriptionSyncFailed" + subscriptionDeleteFailed = "SubscriptionDeleteFailed" ) -// reconciler reconciles GCP-PubSub Channels by creating the K8s Service and Istio VirtualService +// reconciler reconciles GCP-PubSub Channels by creating the K8s Service (ExternalName) // allowing other processes to send data to them. It also creates the GCP PubSub Topics (one per // Channel) and GCP PubSub Subscriptions (one per Subscriber). type reconciler struct { @@ -115,7 +114,7 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } // Does this Controller control this Channel? - if !r.shouldReconcile(c) { + if !ShouldReconcile(c) { logging.FromContext(ctx).Info("Not reconciling Channel, it is not controlled by this Controller", zap.Any("ref", c.Spec)) return reconcile.Result{}, nil } @@ -146,9 +145,9 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err }, reconcileErr } -// shouldReconcile determines if this Controller should control (and therefore reconcile) a given +// ShouldReconcile determines if this Controller should control (and therefore reconcile) a given // Channel. This Controller only handles gcp-pubsub channels. -func (r *reconciler) shouldReconcile(c *eventingv1alpha1.Channel) bool { +func ShouldReconcile(c *eventingv1alpha1.Channel) bool { if c.Spec.Provisioner != nil { return ccpcontroller.IsControlled(c.Spec.Provisioner) } @@ -163,9 +162,8 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) // We are syncing four things: // 1. The K8s Service to talk to this Channel. - // 2. The Istio VirtualService to talk to this Channel. - // 3. The GCP PubSub Topic (one for the Channel). - // 4. The GCP PubSub Subscriptions (one for each Subscriber of the Channel). + // 2. The GCP PubSub Topic (one for the Channel). + // 3. The GCP PubSub Subscriptions (one for each Subscriber of the Channel). // First we will plan all the names out for steps 3 and 4 persist them to status.internal. Then, on a // subsequent reconcile, we manipulate all the GCP resources in steps 3 and 4. @@ -186,7 +184,7 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) } if c.DeletionTimestamp != nil { - // K8s garbage collection will delete the K8s service and VirtualService for this channel. + // K8s garbage collection will delete the K8s service for this channel. // All the subs should be deleted. subsToSync := &syncSubs{ subsToDelete: originalPCS.Subscriptions, @@ -231,18 +229,12 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) return true, nil } - svc, err := r.createK8sService(ctx, c) + _, err = r.createK8sService(ctx, c) if err != nil { r.recorder.Eventf(c, v1.EventTypeWarning, k8sServiceCreateFailed, "Failed to reconcile Channel's K8s Service: %v", err) return false, err } - err = r.createVirtualService(ctx, c, svc) - if err != nil { - r.recorder.Eventf(c, v1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err) - return false, err - } - topic, err := r.createTopic(ctx, plannedPCS, gcpCreds) if err != nil { r.recorder.Eventf(c, v1.EventTypeWarning, topicCreateFailed, "Failed to reconcile Topic for the Channel: %v", err) @@ -356,7 +348,7 @@ func (r *reconciler) planGcpResources(ctx context.Context, c *eventingv1alpha1.C } func (r *reconciler) createK8sService(ctx context.Context, c *eventingv1alpha1.Channel) (*v1.Service, error) { - svc, err := util.CreateK8sService(ctx, r.client, c) + svc, err := util.CreateK8sService(ctx, r.client, c, util.ExternalService(c)) if err != nil { logging.FromContext(ctx).Info("Error creating the Channel's K8s Service", zap.Error(err)) return nil, err @@ -366,15 +358,6 @@ func (r *reconciler) createK8sService(ctx context.Context, c *eventingv1alpha1.C return svc, nil } -func (r *reconciler) createVirtualService(ctx context.Context, c *eventingv1alpha1.Channel, svc *v1.Service) error { - _, err := util.CreateVirtualService(ctx, r.client, c, svc) - if err != nil { - logging.FromContext(ctx).Info("Error creating the Virtual Service for the Channel", zap.Error(err)) - return err - } - return nil -} - func (r *reconciler) createTopic(ctx context.Context, plannedPCS *pubsubutil.GcpPubSubChannelStatus, gcpCreds *google.Credentials) (pubsubutil.PubSubTopic, error) { psc, err := r.pubSubClientCreator(ctx, gcpCreds, plannedPCS.GCPProject) if err != nil { diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go b/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go index 771557768a8..fdaf51be4b2 100644 --- a/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go +++ b/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go @@ -26,19 +26,18 @@ import ( pubsubutil "github.com/knative/eventing/contrib/gcppubsub/pkg/util" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/knative/eventing/pkg/apis/duck/v1alpha1" "github.com/knative/eventing/contrib/gcppubsub/pkg/util/testcreds" "github.com/knative/eventing/contrib/gcppubsub/pkg/util/fakepubsub" - eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" util "github.com/knative/eventing/pkg/provisioners" + "github.com/knative/eventing/pkg/reconciler/names" controllertesting "github.com/knative/eventing/pkg/reconciler/testing" "github.com/knative/eventing/pkg/utils" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "github.com/knative/pkg/system" _ "github.com/knative/pkg/system/testing" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" @@ -47,6 +46,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) const ( @@ -90,18 +90,17 @@ var ( // map of events to set test cases' expectations easier events = map[string]corev1.Event{ - channelReconciled: {Reason: channelReconciled, Type: corev1.EventTypeNormal}, - channelUpdateStatusFailed: {Reason: channelUpdateStatusFailed, Type: corev1.EventTypeWarning}, - channelReadStatusFailed: {Reason: channelReadStatusFailed, Type: corev1.EventTypeWarning}, - gcpCredentialsReadFailed: {Reason: gcpCredentialsReadFailed, Type: corev1.EventTypeWarning}, - gcpResourcesPlanFailed: {Reason: gcpResourcesPlanFailed, Type: corev1.EventTypeWarning}, - gcpResourcesPersistFailed: {Reason: gcpResourcesPersistFailed, Type: corev1.EventTypeWarning}, - virtualServiceCreateFailed: {Reason: virtualServiceCreateFailed, Type: corev1.EventTypeWarning}, - k8sServiceCreateFailed: {Reason: k8sServiceCreateFailed, Type: corev1.EventTypeWarning}, - topicCreateFailed: {Reason: topicCreateFailed, Type: corev1.EventTypeWarning}, - topicDeleteFailed: {Reason: topicDeleteFailed, Type: corev1.EventTypeWarning}, - subscriptionSyncFailed: {Reason: subscriptionSyncFailed, Type: corev1.EventTypeWarning}, - subscriptionDeleteFailed: {Reason: subscriptionDeleteFailed, Type: corev1.EventTypeWarning}, + channelReconciled: {Reason: channelReconciled, Type: corev1.EventTypeNormal}, + channelUpdateStatusFailed: {Reason: channelUpdateStatusFailed, Type: corev1.EventTypeWarning}, + channelReadStatusFailed: {Reason: channelReadStatusFailed, Type: corev1.EventTypeWarning}, + gcpCredentialsReadFailed: {Reason: gcpCredentialsReadFailed, Type: corev1.EventTypeWarning}, + gcpResourcesPlanFailed: {Reason: gcpResourcesPlanFailed, Type: corev1.EventTypeWarning}, + gcpResourcesPersistFailed: {Reason: gcpResourcesPersistFailed, Type: corev1.EventTypeWarning}, + k8sServiceCreateFailed: {Reason: k8sServiceCreateFailed, Type: corev1.EventTypeWarning}, + topicCreateFailed: {Reason: topicCreateFailed, Type: corev1.EventTypeWarning}, + topicDeleteFailed: {Reason: topicDeleteFailed, Type: corev1.EventTypeWarning}, + subscriptionSyncFailed: {Reason: subscriptionSyncFailed, Type: corev1.EventTypeWarning}, + subscriptionDeleteFailed: {Reason: subscriptionDeleteFailed, Type: corev1.EventTypeWarning}, } ) @@ -480,62 +479,6 @@ func TestReconcile(t *testing.T) { events[k8sServiceCreateFailed], }, }, - { - Name: "Virtual service get fails", - InitialState: []runtime.Object{ - makeChannelWithFinalizerAndPCS(), - makeK8sService(), - makeVirtualService(), - testcreds.MakeSecretWithCreds(), - }, - Mocks: controllertesting.Mocks{ - MockLists: errorListingVirtualService(), - }, - WantPresent: []runtime.Object{ - // TODO: This should have a useful error message saying that the VirtualService - // failed. - makeChannelWithFinalizerAndPCSAndAddress(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[virtualServiceCreateFailed], - }, - }, - { - Name: "Virtual service creation fails", - InitialState: []runtime.Object{ - makeChannelWithFinalizerAndPCS(), - makeK8sService(), - testcreds.MakeSecretWithCreds(), - }, - Mocks: controllertesting.Mocks{ - MockCreates: errorCreatingVirtualService(), - }, - WantPresent: []runtime.Object{ - // TODO: This should have a useful error message saying that the VirtualService - // failed. - makeChannelWithFinalizerAndPCSAndAddress(), - }, - WantErrMsg: testErrorMessage, - WantEvent: []corev1.Event{ - events[virtualServiceCreateFailed], - }, - }, - { - Name: "VirtualService already exists - not owned by Channel", - InitialState: []runtime.Object{ - makeChannelWithFinalizerAndPCS(), - makeK8sService(), - makeVirtualServiceNotOwnedByChannel(), - testcreds.MakeSecretWithCreds(), - }, - WantPresent: []runtime.Object{ - makeReadyChannel(), - }, - WantEvent: []corev1.Event{ - events[channelReconciled], - }, - }, { Name: "Error planning - subscriber missing UID", InitialState: []runtime.Object{ @@ -571,7 +514,6 @@ func TestReconcile(t *testing.T) { InitialState: []runtime.Object{ makeChannelWithFinalizerAndPCS(), makeK8sService(), - makeVirtualService(), testcreds.MakeSecretWithCreds(), }, OtherTestData: map[string]interface{}{ @@ -592,7 +534,6 @@ func TestReconcile(t *testing.T) { InitialState: []runtime.Object{ makeChannelWithFinalizerAndPCS(), makeK8sService(), - makeVirtualService(), testcreds.MakeSecretWithCreds(), }, OtherTestData: map[string]interface{}{ @@ -616,8 +557,6 @@ func TestReconcile(t *testing.T) { Name: "Create Topic - topic already exists", InitialState: []runtime.Object{ makeChannelWithFinalizerAndPCS(), - makeK8sService(), - makeVirtualService(), testcreds.MakeSecretWithCreds(), }, OtherTestData: map[string]interface{}{ @@ -630,6 +569,7 @@ func TestReconcile(t *testing.T) { }, }, WantPresent: []runtime.Object{ + makeK8sService(), makeReadyChannel(), }, WantEvent: []corev1.Event{ @@ -641,7 +581,6 @@ func TestReconcile(t *testing.T) { InitialState: []runtime.Object{ makeChannelWithFinalizerAndPCS(), makeK8sService(), - makeVirtualService(), testcreds.MakeSecretWithCreds(), }, OtherTestData: map[string]interface{}{ @@ -663,12 +602,11 @@ func TestReconcile(t *testing.T) { Name: "Create Topic - topic create succeeds", InitialState: []runtime.Object{ makeChannelWithFinalizerAndPCS(), - makeK8sService(), - makeVirtualService(), testcreds.MakeSecretWithCreds(), }, WantPresent: []runtime.Object{ makeReadyChannel(), + makeK8sService(), }, WantEvent: []corev1.Event{ events[channelReconciled], @@ -679,7 +617,6 @@ func TestReconcile(t *testing.T) { InitialState: []runtime.Object{ makeChannelWithSubscribersAndFinalizerAndPCS(), makeK8sService(), - makeVirtualService(), testcreds.MakeSecretWithCreds(), }, OtherTestData: map[string]interface{}{ @@ -704,7 +641,6 @@ func TestReconcile(t *testing.T) { InitialState: []runtime.Object{ makeChannelWithSubscribersAndFinalizerAndPCS(), makeK8sService(), - makeVirtualService(), testcreds.MakeSecretWithCreds(), }, OtherTestData: map[string]interface{}{ @@ -728,7 +664,6 @@ func TestReconcile(t *testing.T) { InitialState: []runtime.Object{ makeChannelWithSubscribersAndFinalizerAndPCS(), makeK8sService(), - makeVirtualService(), testcreds.MakeSecretWithCreds(), }, OtherTestData: map[string]interface{}{ @@ -751,7 +686,6 @@ func TestReconcile(t *testing.T) { InitialState: []runtime.Object{ makeChannelWithSubscribersAndFinalizerAndPCS(), makeK8sService(), - makeVirtualService(), testcreds.MakeSecretWithCreds(), }, WantPresent: []runtime.Object{ @@ -766,7 +700,6 @@ func TestReconcile(t *testing.T) { InitialState: []runtime.Object{ makeChannelWithFinalizerAndPCS(), makeK8sService(), - makeVirtualService(), testcreds.MakeSecretWithCreds(), }, Mocks: controllertesting.Mocks{ @@ -782,7 +715,6 @@ func TestReconcile(t *testing.T) { InitialState: []runtime.Object{ makeChannel(), makeK8sService(), - makeVirtualService(), testcreds.MakeSecretWithCreds(), }, Mocks: controllertesting.Mocks{ @@ -797,7 +729,6 @@ func TestReconcile(t *testing.T) { InitialState: []runtime.Object{ makeChannelWithFinalizerAndPCS(), makeK8sService(), - makeVirtualService(), testcreds.MakeSecretWithCreds(), }, Mocks: controllertesting.Mocks{ @@ -854,7 +785,9 @@ func makeChannel() *eventingv1alpha1.Channel { func makeChannelWithFinalizerAndPCSAndAddress() *eventingv1alpha1.Channel { c := makeChannelWithFinalizerAndPCS() - c.Status.SetAddress(fmt.Sprintf("%s-channel.%s.svc.%s", c.Name, c.Namespace, utils.GetClusterDomainName())) + // serviceAddress is the address of the K8s Service. It uses a GeneratedName and the fake client + // does not fill in Name, so the name is the empty string. + c.Status.SetAddress(fmt.Sprintf(".%s.svc.%s", c.Namespace, utils.GetClusterDomainName())) return c } @@ -1067,11 +1000,13 @@ func makeK8sService() *corev1.Service { Kind: "Service", }, ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-channel", cName), - Namespace: cNamespace, + GenerateName: fmt.Sprintf("%s-channel-", cName), + Namespace: cNamespace, Labels: map[string]string{ - "channel": cName, - "provisioner": ccpName, + util.EventingChannelLabel: cName, + util.OldEventingChannelLabel: cName, + util.EventingProvisionerLabel: ccpName, + util.OldEventingProvisionerLabel: ccpName, }, OwnerReferences: []metav1.OwnerReference{ { @@ -1085,68 +1020,12 @@ func makeK8sService() *corev1.Service { }, }, Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Name: util.PortName, - Port: util.PortNumber, - }, - }, - }, - } -} - -func makeVirtualService() *istiov1alpha3.VirtualService { - return &istiov1alpha3.VirtualService{ - TypeMeta: metav1.TypeMeta{ - APIVersion: istiov1alpha3.SchemeGroupVersion.String(), - Kind: "VirtualService", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-channel", cName), - Namespace: cNamespace, - Labels: map[string]string{ - "channel": cName, - "provisioner": ccpName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), - Kind: "Channel", - Name: cName, - UID: cUID, - Controller: &truePointer, - BlockOwnerDeletion: &truePointer, - }, - }, - }, - Spec: istiov1alpha3.VirtualServiceSpec{ - Hosts: []string{ - fmt.Sprintf("%s-channel.%s.svc.%s", cName, cNamespace, utils.GetClusterDomainName()), - fmt.Sprintf("%s.%s.channels.%s", cName, cNamespace, utils.GetClusterDomainName()), - }, - HTTP: []istiov1alpha3.HTTPRoute{{ - Rewrite: &istiov1alpha3.HTTPRewrite{ - Authority: fmt.Sprintf("%s.%s.channels.%s", cName, cNamespace, utils.GetClusterDomainName()), - }, - Route: []istiov1alpha3.DestinationWeight{{ - Destination: istiov1alpha3.Destination{ - Host: "in-memory-channel-clusterbus.knative-eventing.svc." + utils.GetClusterDomainName(), - Port: istiov1alpha3.PortSelector{ - Number: util.PortNumber, - }, - }}, - }}, - }, + ExternalName: names.ServiceHostName(fmt.Sprintf("%s-dispatcher", ccpName), system.Namespace()), + Type: corev1.ServiceTypeExternalName, }, } } -func makeVirtualServiceNotOwnedByChannel() *istiov1alpha3.VirtualService { - vs := makeVirtualService() - vs.OwnerReferences = nil - return vs -} - func errorOnSecondChannelGet() []controllertesting.MockGet { passThrough := []controllertesting.MockGet{ func(innerClient client.Client, ctx context.Context, key client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) { @@ -1177,18 +1056,6 @@ func errorListingK8sService() []controllertesting.MockList { }, } } - -func errorListingVirtualService() []controllertesting.MockList { - return []controllertesting.MockList{ - func(_ client.Client, _ context.Context, _ *client.ListOptions, obj runtime.Object) (controllertesting.MockHandled, error) { - if _, ok := obj.(*istiov1alpha3.VirtualServiceList); ok { - return controllertesting.Handled, errors.New(testErrorMessage) - } - return controllertesting.Unhandled, nil - }, - } -} - func errorCreatingK8sService() []controllertesting.MockCreate { return []controllertesting.MockCreate{ func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { @@ -1200,17 +1067,6 @@ func errorCreatingK8sService() []controllertesting.MockCreate { } } -func errorCreatingVirtualService() []controllertesting.MockCreate { - return []controllertesting.MockCreate{ - func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { - if _, ok := obj.(*istiov1alpha3.VirtualService); ok { - return controllertesting.Handled, errors.New(testErrorMessage) - } - return controllertesting.Unhandled, nil - }, - } -} - func errorUpdatingChannel() []controllertesting.MockUpdate { return []controllertesting.MockUpdate{ func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { diff --git a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go index 078e08bf2d4..830a6da1746 100644 --- a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go +++ b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go @@ -17,8 +17,11 @@ package main import ( + "context" "flag" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/clusterchannelprovisioner" "github.com/knative/eventing/contrib/gcppubsub/pkg/dispatcher/dispatcher" "github.com/knative/eventing/contrib/gcppubsub/pkg/dispatcher/receiver" @@ -61,7 +64,7 @@ func main() { // PubSub) and the dispatcher (takes messages in PubSub and sends them in cluster) in this // binary. - _, runnables, err := receiver.New(logger.Desugar(), mgr.GetClient(), util.GcpPubSubClientCreator) + receiver, runnables, err := receiver.New(logger.Desugar(), mgr.GetClient(), util.GcpPubSubClientCreator) if err != nil { logger.Fatal("Unable to create new receiver and runnable", zap.Error(err)) } @@ -72,7 +75,14 @@ func main() { } } - if _, err = dispatcher.New(mgr, logger.Desugar()); err != nil { + if _, err = dispatcher.New( + mgr, + logger.Desugar(), + []dispatcher.ReconcileHandlers{ + func(ctx context.Context, _ reconcile.Request) error { + return receiver.UpdateHostToChannelMap(ctx) + }, + }); err != nil { logger.Fatal("Unable to create the dispatcher", zap.Error(err)) } diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go index 00dd7cdf44a..cdc3c83d934 100644 --- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go +++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go @@ -53,7 +53,7 @@ const ( // New returns a Controller that represents the dispatcher portion (messages from GCP PubSub are // sent into the cluster) of the GCP PubSub dispatcher. We use a reconcile loop to watch all // Channels and notice changes to them. It uses an exponential backoff to throttle the retries. -func New(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) { +func New(mgr manager.Manager, logger *zap.Logger, additionalHandlers []ReconcileHandlers) (controller.Controller, error) { // reconcileChan is used when the dispatcher itself needs to force reconciliation of a Channel. reconcileChan := make(chan event.GenericEvent) @@ -71,7 +71,8 @@ func New(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) subscriptionsLock: sync.Mutex{}, subscriptions: map[channelName]map[subscriptionName]context.CancelFunc{}, - rateLimiter: workqueue.NewItemExponentialFailureRateLimiter(expBackoffBaseDelay, expBackoffMaxDelay), + rateLimiter: workqueue.NewItemExponentialFailureRateLimiter(expBackoffBaseDelay, expBackoffMaxDelay), + additionalHandlers: additionalHandlers, } c, err := controller.New(controllerAgentName, mgr, controller.Options{ diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go index 0186fbfbbe3..4351dd7670f 100644 --- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go +++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go @@ -22,11 +22,12 @@ import ( "sync" "time" + "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/channel" + v1 "k8s.io/api/core/v1" "k8s.io/client-go/util/workqueue" - ccpcontroller "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/clusterchannelprovisioner" pubsubutil "github.com/knative/eventing/contrib/gcppubsub/pkg/util" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/logging" @@ -54,6 +55,9 @@ type channelName = types.NamespacedName type subscriptionName = types.NamespacedName type empty struct{} +// ReconcileHandlers will be run by in addition to exiting reconcile +type ReconcileHandlers func(context.Context, reconcile.Request) error + // reconciler reconciles Channels with the gcp-pubsub provisioner. It sets up hanging polling for // every Subscription to any Channel. type reconciler struct { @@ -76,6 +80,8 @@ type reconciler struct { // rateLimiter is used to limit the pace at which we nack a message when it could not be dispatched. rateLimiter workqueue.RateLimiter + + additionalHandlers []ReconcileHandlers } // Verify the struct implements reconcile.Reconciler @@ -106,7 +112,7 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } // Does this Controller control this Channel? - if !r.shouldReconcile(c) { + if !channel.ShouldReconcile(c) { logging.FromContext(ctx).Info("Not reconciling Channel, it is not controlled by this Controller", zap.Any("ref", c.Spec)) return reconcile.Result{}, nil } @@ -145,15 +151,6 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err }, reconcileErr } -// shouldReconcile determines if this Controller should control (and therefore reconcile) a given -// ClusterChannelProvisioner. This Controller only handles gcp-pubsub Channels. -func (r *reconciler) shouldReconcile(c *eventingv1alpha1.Channel) bool { - if c.Spec.Provisioner != nil { - return ccpcontroller.IsControlled(c.Spec.Provisioner) - } - return false -} - // reconcile reconciles this Channel so that the real world matches the intended state. The returned // boolean indicates if this Channel should be immediately requeued for another reconcile loop. The // returned error indicates an error during reconciliation. @@ -176,6 +173,12 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel, return true, nil } + for _, h := range r.additionalHandlers { + if err := h(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: c.Name, Namespace: c.Namespace}}); err != nil { + logging.FromContext(ctx).Error("Failed reconcile.", zap.Error(err)) + return false, err + } + } // enqueueChannelForReconciliation is a function that when run will force this Channel to be // reconciled again. enqueueChannelForReconciliation := func() { diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go index 7ed8937283e..544b14b2a5a 100644 --- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go +++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go @@ -30,11 +30,10 @@ import ( "github.com/knative/eventing/contrib/gcppubsub/pkg/util" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/knative/eventing/pkg/provisioners" "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/knative/eventing/contrib/gcppubsub/pkg/util/testcreds" "github.com/knative/eventing/pkg/apis/duck/v1alpha1" @@ -65,10 +64,11 @@ const ( gcpProject = "gcp-project" - pscData = "pscData" - reconcileChan = "reconcileChan" - shouldBeCanceled = "shouldBeCanceled" - shouldNotBeCanceled = "shouldNotBeCanceled" + pscData = "pscData" + reconcileChan = "reconcileChan" + shouldBeCanceled = "shouldBeCanceled" + shouldNotBeCanceled = "shouldNotBeCanceled" + additionalHandlerError = "Error in additional test handler." ) var ( @@ -99,6 +99,8 @@ var ( dispatcherReconcileFailed: {Reason: dispatcherReconcileFailed, Type: corev1.EventTypeWarning}, dispatcherUpdateStatusFailed: {Reason: dispatcherUpdateStatusFailed, Type: corev1.EventTypeWarning}, } + + hostname = fmt.Sprintf("%s-channel.%s.svc.%s", cName, cNamespace, utils.GetClusterDomainName()) ) func init() { @@ -376,6 +378,22 @@ func TestReconcile(t *testing.T) { events[dispatcherReconciled], events[dispatcherUpdateStatusFailed], }, }, + { + Name: "Fail additional reconcile handler", + InitialState: []runtime.Object{ + makeChannelWithSubscribersAndFinalizer(), + testcreds.MakeSecretWithCreds(), + }, + WantPresent: []runtime.Object{ + makeChannelWithSubscribersAndFinalizer(), + }, + WantEvent: []corev1.Event{ + events[dispatcherReconcileFailed], + }, + OtherTestData: map[string]interface{}{ + additionalHandlerError: additionalHandlerError, + }, + }, // Note - we do not test update status since this dispatcher only adds // finalizers to the channel } @@ -423,12 +441,19 @@ func TestReconcile(t *testing.T) { r.subscriptions[c][s] = cc.wantNotCancel(c, s) } } + if tc.OtherTestData[additionalHandlerError] != nil { + r.additionalHandlers = []ReconcileHandlers{ + func(_ context.Context, _ reconcile.Request) error { + return fmt.Errorf(tc.OtherTestData[additionalHandlerError].(string)) + }, + } + tc.WantErrMsg = additionalHandlerError + } tc.AdditionalVerification = append(tc.AdditionalVerification, cc.verify) tc.IgnoreTimes = true t.Run(tc.Name, tc.Runner(t, r, c, recorder)) } } - func TestReceiveFunc(t *testing.T) { testCases := map[string]struct { ack bool @@ -524,7 +549,7 @@ func makeChannel() *eventingv1alpha1.Channel { }, } c.Status.InitializeConditions() - c.Status.SetAddress(fmt.Sprintf("%s-channel.%s.svc.%s", c.Name, c.Namespace, utils.GetClusterDomainName())) + c.Status.SetAddress(hostname) c.Status.MarkProvisioned() pcs := &util.GcpPubSubChannelStatus{ GCPProject: gcpProject, @@ -642,6 +667,16 @@ func errorGettingChannel() []controllertesting.MockGet { } } +func errorListingChannels() []controllertesting.MockList { + return []controllertesting.MockList{ + func(_ client.Client, _ context.Context, _ *client.ListOptions, obj runtime.Object) (controllertesting.MockHandled, error) { + if _, ok := obj.(*eventingv1alpha1.ChannelList); ok { + return controllertesting.Handled, errors.New(testErrorMessage) + } + return controllertesting.Unhandled, nil + }, + } +} func errorUpdatingChannel() []controllertesting.MockUpdate { return []controllertesting.MockUpdate{ func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) { diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go index 665bb80dda5..c0e4381ab67 100644 --- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go +++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go @@ -20,8 +20,13 @@ import ( "context" "errors" "fmt" + "sync" + "sync/atomic" + + "github.com/knative/eventing/pkg/channelwatcher" "cloud.google.com/go/pubsub" + "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/channel" "github.com/knative/eventing/contrib/gcppubsub/pkg/dispatcher/receiver/cache" "github.com/knative/eventing/contrib/gcppubsub/pkg/util" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" @@ -40,6 +45,9 @@ type Receiver struct { pubSubClientCreator util.PubSubClientCreator cache *cache.TTL + + hostToChannelMapMutex sync.Mutex + hostToChannelMap atomic.Value } // New creates a new Receiver and its associated MessageReceiver. The caller is responsible for @@ -52,7 +60,9 @@ func New(logger *zap.Logger, client client.Client, pubSubClientCreator util.PubS pubSubClientCreator: pubSubClientCreator, cache: cache.NewTTL(), } + r.setHostToChannelMap(map[string]provisioners.ChannelReference{}) receiver, err := r.newMessageReceiver() + if err != nil { return nil, nil, err } @@ -60,7 +70,18 @@ func New(logger *zap.Logger, client client.Client, pubSubClientCreator util.PubS } func (r *Receiver) newMessageReceiver() (*provisioners.MessageReceiver, error) { - return provisioners.NewMessageReceiver(r.sendEventToTopic, r.logger.Sugar()) + return provisioners.NewMessageReceiver( + r.sendEventToTopic, + r.logger.Sugar(), + provisioners.ResolveChannelFromHostHeader(provisioners.ResolveChannelFromHostFunc(r.getChannelReferenceFromHost))) +} +func (r *Receiver) getChannelReferenceFromHost(host string) (provisioners.ChannelReference, error) { + chMap := r.getHostToChannelMap() + cr, ok := chMap[host] + if !ok { + return cr, fmt.Errorf("Invalid HostName:%s. HostName not found in any of the watched natss channels", host) + } + return cr, nil } // sendEventToTopic sends a message to the Cloud Pub/Sub Topic backing the Channel. @@ -152,3 +173,36 @@ func (r *Receiver) getChannel(ctx context.Context, ref provisioners.ChannelRefer return c, err } +func (r *Receiver) getHostToChannelMap() map[string]provisioners.ChannelReference { + return r.hostToChannelMap.Load().(map[string]provisioners.ChannelReference) +} + +func (r *Receiver) setHostToChannelMap(hcMap map[string]provisioners.ChannelReference) { + r.hostToChannelMap.Store(hcMap) +} + +// UpdateHostToChannelMap will be called from the controller that watches natss channels. +// It will update internal hostToChannelMap which is used to resolve the hostHeader of the +// incoming request to the correct ChannelReference in the receiver function. +func (r *Receiver) UpdateHostToChannelMap(ctx context.Context) error { + logging.FromContext(ctx).Info("UpdateHostToChannelMap: Acquiring mutex lock") + r.hostToChannelMapMutex.Lock() + defer r.hostToChannelMapMutex.Unlock() + logging.FromContext(ctx).Info("UpdateHostToChannelMap: Acquired mutex lock. Updating internal map") + + chanList, err := channelwatcher.ListAllChannels(ctx, r.client, channel.ShouldReconcile) + if err != nil { + logging.FromContext(ctx).Error("UpdateHostToChannelMap: Failed to list all channels.", zap.Error(err)) + return err + } + + hostToChanMap, err := provisioners.NewHostNameToChannelRefMap(chanList) + if err != nil { + logging.FromContext(ctx).Info("UpdateHostToChannelMap: Error occured when creating the new hostToChannel map.", zap.Error(err)) + return err + } + + r.setHostToChannelMap(hostToChanMap) + logging.FromContext(ctx).Info("UpdateHostToChannelMap: Update successful. Releasing mutex lock") + return nil +} diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go index d6d69db23b6..10b26dfad4a 100644 --- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go +++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go @@ -19,25 +19,30 @@ package receiver import ( "context" "errors" + "fmt" "net/http/httptest" "strings" "testing" - "github.com/knative/eventing/contrib/gcppubsub/pkg/util" + "github.com/google/go-cmp/cmp" + "github.com/knative/eventing/contrib/gcppubsub/pkg/util" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/provisioners" + corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/scheme" "github.com/knative/eventing/contrib/gcppubsub/pkg/util/fakepubsub" - "github.com/knative/eventing/pkg/utils" "go.uber.org/zap" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/knative/eventing/contrib/gcppubsub/pkg/util/testcreds" + controllertesting "github.com/knative/eventing/pkg/reconciler/testing" ) const ( @@ -54,6 +59,9 @@ const ( "contentType" : "text/xml", "data" : "" }` + ccpName = "gcp-pubsub" + listChannelsFailed = "Failed to list channels" + hostname = "a.b.c.d" ) func init() { @@ -76,14 +84,14 @@ func TestReceiver(t *testing.T) { "can't read status": { initialState: []runtime.Object{ testcreds.MakeSecretWithInvalidCreds(), - makeChannelWithBadStatus(), + makeChannel(withBadStatus()), }, expectedErr: true, }, "blank status": { initialState: []runtime.Object{ testcreds.MakeSecretWithInvalidCreds(), - makeChannelWithBlankStatus(), + makeChannel(withBlankStatus()), }, expectedErr: true, }, @@ -123,7 +131,7 @@ func TestReceiver(t *testing.T) { "Publish succeeds": { initialState: []runtime.Object{ testcreds.MakeSecretWithCreds(), - makeChannel(), + makeChannel(withStatusReady(hostname)), }, }, } @@ -136,13 +144,15 @@ func TestReceiver(t *testing.T) { if err != nil { t.Errorf("Error when creating a New receiver. Error:%s", err) } + mr.setHostToChannelMap(map[string]provisioners.ChannelReference{}) resp := httptest.NewRecorder() req := httptest.NewRequest("POST", "/", strings.NewReader(validMessage)) - req.Host = "test-channel.test-namespace.channels." + utils.GetClusterDomainName() + req.Host = hostname receiver, err := mr.newMessageReceiver() if err != nil { t.Errorf("Error when creating a new message receiver. Error:%s", err) } + mr.UpdateHostToChannelMap(context.TODO()) receiver.HandleRequest(resp, req) if tc.expectedErr { if resp.Result().StatusCode >= 200 && resp.Result().StatusCode < 300 { @@ -157,7 +167,112 @@ func TestReceiver(t *testing.T) { } } -func makeChannel() *eventingv1alpha1.Channel { +func TestUpdateHostToChannelMap(t *testing.T) { + testCases := []struct { + name string + initialState []runtime.Object + expectedMap map[string]provisioners.ChannelReference + expectedErrMsg string + mocks controllertesting.Mocks + }{ + { + name: "client.List() channels fails.", + initialState: []runtime.Object{ + makeChannel(withStatusReady(hostname)), + }, + expectedErrMsg: listChannelsFailed, + expectedMap: map[string]provisioners.ChannelReference{}, + mocks: controllertesting.Mocks{ + MockLists: []controllertesting.MockList{ + func(_ client.Client, _ context.Context, _ *client.ListOptions, _ runtime.Object) (controllertesting.MockHandled, error) { + return controllertesting.Handled, fmt.Errorf(listChannelsFailed) + }, + }, + }, + }, + { + name: "Duplciate hostnames.", + initialState: []runtime.Object{ + makeChannel(withName("chan1"), withNamespace("ns1"), withStatusReady("host.name")), + makeChannel(withName("chan2"), withNamespace("ns2"), withStatusReady("host.name")), + }, + expectedErrMsg: "Duplicate hostName found. Each channel must have a unique host header. HostName:host.name, channel:ns2.chan2, channel:ns1.chan1", + expectedMap: map[string]provisioners.ChannelReference{}, + }, + { + name: "Successfully updated hostToChannelMap.", + initialState: []runtime.Object{ + makeChannel(withName("chan1"), withNamespace("ns1"), withStatusReady("host.name1")), + makeChannel(withName("chan2"), withNamespace("ns2"), withStatusReady("host.name2")), + }, + expectedMap: map[string]provisioners.ChannelReference{ + "host.name1": provisioners.ChannelReference{Name: "chan1", Namespace: "ns1"}, + "host.name2": provisioners.ChannelReference{Name: "chan2", Namespace: "ns2"}, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + c := controllertesting.NewMockClient(fake.NewFakeClient(tc.initialState...), tc.mocks) + r, _, err := New(zap.NewNop(), c, fakepubsub.Creator(nil)) + if err != nil { + t.Fatalf("Failed to create receiver.") + } + if err := r.UpdateHostToChannelMap(context.Background()); err != nil { + if diff := cmp.Diff(tc.expectedErrMsg, err.Error()); diff != "" { + t.Fatalf("Unexpected difference (-want +got): %v", diff) + } + } else if tc.expectedErrMsg != "" { + t.Fatalf("Want error:%s, Got nil", tc.expectedErrMsg) + } + + if diff := cmp.Diff(tc.expectedMap, r.getHostToChannelMap()); diff != "" { + t.Fatalf("Unexpected difference (-want +got): %v", diff) + } + }) + } +} + +type option func(*eventingv1alpha1.Channel) + +func withName(name string) option { + return func(c *eventingv1alpha1.Channel) { + c.Name = name + } +} + +func withNamespace(ns string) option { + return func(c *eventingv1alpha1.Channel) { + c.Namespace = ns + } +} + +func withStatusReady(hn string) option { + return func(c *eventingv1alpha1.Channel) { + c.Status.InitializeConditions() + c.Status.InitializeConditions() + c.Status.MarkProvisioned() + c.Status.MarkProvisionerInstalled() + c.Status.SetAddress(hn) + } +} + +func withBlankStatus() option { + return func(c *eventingv1alpha1.Channel) { + c.Status = eventingv1alpha1.ChannelStatus{} + } +} + +func withBadStatus() option { + return func(c *eventingv1alpha1.Channel) { + c.Status.Internal = &runtime.RawExtension{ + // SecretKey must be a string, not an integer, so this will fail during json.Unmarshal. + Raw: []byte(`{"secretKey": 123}`), + } + } +} + +func makeChannel(opts ...option) *eventingv1alpha1.Channel { c := &eventingv1alpha1.Channel{ TypeMeta: v1.TypeMeta{ APIVersion: "eventing.knative.dev/v1alpha1", @@ -167,6 +282,11 @@ func makeChannel() *eventingv1alpha1.Channel { Namespace: "test-namespace", Name: "test-channel", }, + Spec: eventingv1alpha1.ChannelSpec{ + Provisioner: &corev1.ObjectReference{ + Name: ccpName, + }, + }, } pcs := &util.GcpPubSubChannelStatus{ GCPProject: "project", @@ -176,20 +296,9 @@ func makeChannel() *eventingv1alpha1.Channel { if err := util.SetInternalStatus(context.Background(), c, pcs); err != nil { panic(err) } - return c -} - -func makeChannelWithBlankStatus() *eventingv1alpha1.Channel { - c := makeChannel() - c.Status = eventingv1alpha1.ChannelStatus{} - return c -} -func makeChannelWithBadStatus() *eventingv1alpha1.Channel { - c := makeChannel() - c.Status.Internal = &runtime.RawExtension{ - // SecretKey must be a string, not an integer, so this will fail during json.Unmarshal. - Raw: []byte(`{"secretKey": 123}`), + for _, opt := range opts { + opt(c) } return c } diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go index 4599d73837e..3b86bbb06b5 100644 --- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go +++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go @@ -311,7 +311,7 @@ func (s *SubscriptionsSupervisor) setHostToChannelMap(hcMap map[string]provision } // UpdateHostToChannelMap will be called from the controller that watches natss channels. -// It will update internal hostToChannelMap which is used to resolve the hostHeader of the +// It will update internal hostToChannelMap which is used to resolve the hostHeader of the // incoming request to the correct ChannelReference in the receiver function. func (s *SubscriptionsSupervisor) UpdateHostToChannelMap(ctx context.Context, chanList []eventingv1alpha1.Channel) error { logging.FromContext(ctx).Info("UpdateHostToChannelMap: Acquiring mutex lock") @@ -319,21 +319,11 @@ func (s *SubscriptionsSupervisor) UpdateHostToChannelMap(ctx context.Context, ch defer s.hostToChannelMapMutex.Unlock() logging.FromContext(ctx).Info("UpdateHostToChannelMap: Acquired mutex lock. Updating internal map") - hostToChanMap := make(map[string]provisioners.ChannelReference, len(chanList)) - for _, c := range chanList { - hostName := c.Status.Address.Hostname - if cr, ok := hostToChanMap[hostName]; ok { - return fmt.Errorf( - "Duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s", - hostName, - c.Namespace, - c.Name, - cr.Namespace, - cr.Name) - } - hostToChanMap[hostName] = provisioners.ChannelReference{Name: c.Name, Namespace: c.Namespace} + hostToChanMap, err := provisioners.NewHostNameToChannelRefMap(chanList) + if err != nil { + logging.FromContext(ctx).Info("UpdateHostToChannelMap: Error occured when creating the hostheader to channelref map.", zap.Error(err)) + return err } - s.setHostToChannelMap(hostToChanMap) logging.FromContext(ctx).Info("UpdateHostToChannelMap: Update successful. Releasing mutex lock") return nil diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index 90a687b8e67..b325a58c31d 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -8,7 +8,6 @@ import ( "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/sidecar/swappable" "go.uber.org/zap" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -25,7 +24,7 @@ type reconciler struct { } func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) { - ctx := logging.WithLogger(context.TODO(), r.logger.With(zap.Any("request", req))) + ctx := logging.WithLogger(context.Background(), r.logger.With(zap.Any("request", req))) logging.FromContext(ctx).Info("New update for channel.") if err := r.handler(ctx, r.client, req.NamespacedName); err != nil { logging.FromContext(ctx).Error("WatchHandlerFunc returned error", zap.Error(err)) @@ -86,25 +85,14 @@ func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch S // ListAllChannels queries client and gets list of all channels for which shouldWatch returns true. func ListAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) { channels := make([]v1alpha1.Channel, 0) - for { - cl := &v1alpha1.ChannelList{} - opts := &client.ListOptions{ - // Set Raw because if we need to get more than one page, then we will put the continue token - // into opts.Raw.Continue. - Raw: &metav1.ListOptions{}, - } - if err := c.List(ctx, opts, cl); err != nil { - return nil, err - } - for _, c := range cl.Items { - if c.Status.IsReady() && shouldWatch(&c) { - channels = append(channels, c) - } - } - if cl.Continue != "" { - opts.Raw.Continue = cl.Continue - } else { - return channels, nil + cl := &v1alpha1.ChannelList{} + if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { + return nil, err + } + for _, c := range cl.Items { + if c.Status.IsReady() && shouldWatch(&c) { + channels = append(channels, c) } } + return channels, nil } diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go index d61205c6897..903769ffab3 100644 --- a/pkg/provisioners/channel_util.go +++ b/pkg/provisioners/channel_util.go @@ -394,3 +394,22 @@ func channelServiceName(channelName string) string { func channelHostName(channelName, namespace string) string { return fmt.Sprintf("%s.%s.channels.%s", channelName, namespace, utils.GetClusterDomainName()) } + +// NewHostNameToChannelRefMap parses each channel from cList and creates a map[string(Status.Address.HostName)]ChannelReference +func NewHostNameToChannelRefMap(cList []eventingv1alpha1.Channel) (map[string]ChannelReference, error) { + hostToChanMap := make(map[string]ChannelReference, len(cList)) + for _, c := range cList { + hostName := c.Status.Address.Hostname + if cr, ok := hostToChanMap[hostName]; ok { + return nil, fmt.Errorf( + "Duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s", + hostName, + c.Namespace, + c.Name, + cr.Namespace, + cr.Name) + } + hostToChanMap[hostName] = ChannelReference{Name: c.Name, Namespace: c.Namespace} + } + return hostToChanMap, nil +} From d1a1bd57d2ba3bb59ee3a048cbcff16e87479cb5 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Wed, 17 Apr 2019 18:03:28 -0700 Subject: [PATCH 24/37] Changes based on PR comments --- .../pkg/dispatcher/receiver/receiver_test.go | 4 +-- contrib/kafka/config/kafka.yaml | 18 ---------- .../pkg/controller/channel/reconcile_test.go | 2 -- contrib/kafka/pkg/dispatcher/dispatcher.go | 22 ++++++------ .../kafka/pkg/dispatcher/dispatcher_test.go | 2 +- pkg/channelwatcher/channel_watcher.go | 36 +++++++------------ pkg/channelwatcher/channel_watcher_test.go | 8 ++--- pkg/provisioners/message_receiver.go | 6 ++-- pkg/sidecar/fanout/fanout_handler_test.go | 6 ++-- pkg/sidecar/multichannelfanout/config.go | 2 +- pkg/sidecar/multichannelfanout/config_test.go | 18 +++------- 11 files changed, 41 insertions(+), 83 deletions(-) diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go index d6d69db23b6..c4789c2c9ed 100644 --- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go +++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go @@ -134,14 +134,14 @@ func TestReceiver(t *testing.T) { fake.NewFakeClient(tc.initialState...), fakepubsub.Creator(tc.pubSubData)) if err != nil { - t.Errorf("Error when creating a New receiver. Error:%s", err) + t.Fatalf("Error when creating a New receiver. Error:%s", err) } resp := httptest.NewRecorder() req := httptest.NewRequest("POST", "/", strings.NewReader(validMessage)) req.Host = "test-channel.test-namespace.channels." + utils.GetClusterDomainName() receiver, err := mr.newMessageReceiver() if err != nil { - t.Errorf("Error when creating a new message receiver. Error:%s", err) + t.Fatalf("Error when creating a new message receiver. Error:%s", err) } receiver.HandleRequest(resp, req) if tc.expectedErr { diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index c58fb14296b..990c7aa20cc 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -67,24 +67,6 @@ rules: - kafka-channel-dispatcher verbs: - update - - apiGroups: - - networking.istio.io - resources: - - virtualservices - verbs: - - get - - list - - watch - - create - - update - - apiGroups: - - "" # Core API Group. - resources: - - events - verbs: - - create - - patch - - update --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/contrib/kafka/pkg/controller/channel/reconcile_test.go b/contrib/kafka/pkg/controller/channel/reconcile_test.go index 33fad32efb3..02836e06a54 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile_test.go +++ b/contrib/kafka/pkg/controller/channel/reconcile_test.go @@ -34,7 +34,6 @@ import ( controllertesting "github.com/knative/eventing/pkg/reconciler/testing" "github.com/knative/eventing/pkg/utils" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "github.com/knative/pkg/system" _ "github.com/knative/pkg/system/testing" corev1 "k8s.io/api/core/v1" @@ -72,7 +71,6 @@ var ( func init() { // Add types to scheme eventingv1alpha1.AddToScheme(scheme.Scheme) - istiov1alpha3.AddToScheme(scheme.Scheme) } var mockFetchError = controllertesting.Mocks{ diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index 9c84f6f4d4a..bef6061a75f 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -34,7 +34,7 @@ import ( ) type KafkaDispatcher struct { - // TODO: config doesn't have to be atomic as it is read an updated using updateLock. + // TODO: config doesn't have to be atomic as it is read and updated using updateLock. config atomic.Value hostToChannelMap atomic.Value updateLock sync.Mutex @@ -43,10 +43,8 @@ type KafkaDispatcher struct { dispatcher *provisioners.MessageDispatcher kafkaAsyncProducer sarama.AsyncProducer - // TODO: kafkaConsumer map should probably be atomic as it is updated and read on separate go routines with no syncchronization. - // Verify if this is an issue and fix accordignly - kafkaConsumers map[provisioners.ChannelReference]map[subscription]KafkaConsumer - kafkaCluster KafkaCluster + kafkaConsumers map[provisioners.ChannelReference]map[subscription]KafkaConsumer + kafkaCluster KafkaCluster logger *zap.Logger } @@ -91,7 +89,7 @@ type subscription struct { // ConfigDiff diffs the new config with the existing config. If there are no differences, then the // empty string is returned. If there are differences, then a non-empty string is returned // describing the differences. -func (d *KafkaDispatcher) ConfigDiff(updated *multichannelfanout.Config) string { +func (d *KafkaDispatcher) configDiff(updated *multichannelfanout.Config) string { return cmp.Diff(d.getConfig(), updated) } @@ -103,7 +101,7 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error d.updateLock.Lock() defer d.updateLock.Unlock() - if diff := d.ConfigDiff(config); diff != "" { + if diff := d.configDiff(config); diff != "" { d.logger.Info("Updating config (-old +new)", zap.String("diff", diff)) newSubs := make(map[subscription]bool) @@ -148,11 +146,11 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error } func createHostToChannelMap(config *multichannelfanout.Config) (map[string]provisioners.ChannelReference, error) { - hcMap := make(map[string]provisioners.ChannelReference) + hcMap := make(map[string]provisioners.ChannelReference, len(config.ChannelConfigs)) for _, cConfig := range config.ChannelConfigs { if cr, ok := hcMap[cConfig.HostName]; ok { return nil, fmt.Errorf( - "Duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s", + "duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s", cConfig.HostName, cConfig.Namespace, cConfig.Name, @@ -190,6 +188,8 @@ func (d *KafkaDispatcher) Start(stopCh <-chan struct{}) error { return d.receiver.Start(stopCh) } +// subscribe reads kafkaConsumers which gets updated in UpdateConfig in a separate go-routine. +// subscribe must be called under updateLock. func (d *KafkaDispatcher) subscribe(channelRef provisioners.ChannelReference, sub subscription) error { d.logger.Info("Subscribing", zap.Any("channelRef", channelRef), zap.Any("subscription", sub)) @@ -262,6 +262,8 @@ func (d *KafkaDispatcher) dispatch(channelRef provisioners.ChannelReference, sub return err } +// unsubscribe reads kafkaConsumers which gets updated in UpdateConfig in a separate go-routine. +// unsubscribe must be called under updateLock. func (d *KafkaDispatcher) unsubscribe(channel provisioners.ChannelReference, sub subscription) error { d.logger.Info("Unsubscribing from channel", zap.Any("channel", channel), zap.Any("subscription", sub)) if consumer, ok := d.kafkaConsumers[channel][sub]; ok { @@ -336,7 +338,7 @@ func (d *KafkaDispatcher) getChannelReferenceFromHost(host string) (provisioners chMap := d.getHostToChannelMap() cr, ok := chMap[host] if !ok { - return cr, fmt.Errorf("Invalid HostName:%s. HostName not found in ConfigMap for any Channel", host) + return cr, fmt.Errorf("invalid Hostname:%s. Hostname not found in ConfigMap for any Channel", host) } return cr, nil } diff --git a/contrib/kafka/pkg/dispatcher/dispatcher_test.go b/contrib/kafka/pkg/dispatcher/dispatcher_test.go index 9e697e83262..8f0e4f39169 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher_test.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher_test.go @@ -406,7 +406,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) { }, }, }, - createErr: "Duplicate hostName found. Each channel must have a unique host header. HostName:a.b.c.d, channel:default.test-channel-2, channel:default.test-channel-1", + createErr: "duplicate hostName found. Each channel must have a unique host header. HostName:a.b.c.d, channel:default.test-channel-2, channel:default.test-channel-1", oldHostToChanMap: map[string]provisioners.ChannelReference{}, }, } diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go index ff8a7852e65..a2822b8df45 100644 --- a/pkg/channelwatcher/channel_watcher.go +++ b/pkg/channelwatcher/channel_watcher.go @@ -8,7 +8,6 @@ import ( "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/sidecar/swappable" "go.uber.org/zap" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -67,10 +66,10 @@ type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) type ShouldWatchFunc func(ch *v1alpha1.Channel) bool // UpdateConfigWatchHandler is a special handler that -// 1. Lists the channels for which shouldWatch returns true -// 2. Creates a multi-channel-fanout-config -// 3. Calls the updateConfig func with the new multi-channel-fanout-config -// This is used by dispatchers or receivers to update their configs by watching channels +// 1. Lists the channels for which shouldWatch returns true. +// 2. Creates a multi-channel-fanout-config. +// 3. Calls the updateConfig func with the new multi-channel-fanout-config. +// This is used by dispatchers or receivers to update their configs by watching channels. func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch ShouldWatchFunc) WatchHandlerFunc { return func(ctx context.Context, c client.Client, _ types.NamespacedName) error { channels, err := listAllChannels(ctx, c, shouldWatch) @@ -86,25 +85,14 @@ func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch S // listAllChannels queries client and gets list of all channels for which shouldWatch returns true. func listAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) { channels := make([]v1alpha1.Channel, 0) - for { - cl := &v1alpha1.ChannelList{} - opts := &client.ListOptions{ - // Set Raw because if we need to get more than one page, then we will put the continue token - // into opts.Raw.Continue. - Raw: &metav1.ListOptions{}, - } - if err := c.List(ctx, opts, cl); err != nil { - return nil, err - } - for _, c := range cl.Items { - if c.Status.IsReady() && shouldWatch(&c) { - channels = append(channels, c) - } - } - if cl.Continue != "" { - opts.Raw.Continue = cl.Continue - } else { - return channels, nil + cl := &v1alpha1.ChannelList{} + if err := c.List(ctx, &client.ListOptions{}, cl); err != nil { + return nil, err + } + for _, c := range cl.Items { + if c.Status.IsReady() && shouldWatch(&c) { + channels = append(channels, c) } } + return channels, nil } diff --git a/pkg/channelwatcher/channel_watcher_test.go b/pkg/channelwatcher/channel_watcher_test.go index 56f9f772873..0ac1efe43bc 100644 --- a/pkg/channelwatcher/channel_watcher_test.go +++ b/pkg/channelwatcher/channel_watcher_test.go @@ -48,9 +48,9 @@ func TestUpdateConfigWatchHandler(t *testing.T) { { name: "Successfully update config", channels: []runtime.Object{ - makechannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))), - makechannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), - makechannel("chan-3", "donotwatch", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), + makeChannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))), + makeChannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), + makeChannel("chan-3", "donotwatch", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), }, expectedConfig: &multichannelfanout.Config{ ChannelConfigs: []multichannelfanout.ChannelConfig{ @@ -147,7 +147,7 @@ func getClientMocks(listError error) controllertesting.Mocks { return controllertesting.Mocks{} } -func makechannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) *v1alpha1.Channel { +func makeChannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) *v1alpha1.Channel { c := v1alpha1.Channel{ Spec: v1alpha1.ChannelSpec{ Subscribable: subscribable, diff --git a/pkg/provisioners/message_receiver.go b/pkg/provisioners/message_receiver.go index 4c9bac7f93a..175c796762b 100644 --- a/pkg/provisioners/message_receiver.go +++ b/pkg/provisioners/message_receiver.go @@ -41,15 +41,15 @@ type MessageReceiver struct { hostToChannelFunc ResolveChannelFromHostFunc } -// ReceiverOptions provides functional options to MessageReceiver function +// ReceiverOptions provides functional options to MessageReceiver function. type ReceiverOptions func(*MessageReceiver) error // ResolveChannelFromHostFunc function enables MessageReceiver to get the Channel Reference from incoming request HostHeader -// before calling receiverFunc +// before calling receiverFunc. type ResolveChannelFromHostFunc func(string) (ChannelReference, error) // ResolveChannelFromHostHeader is a ReceiverOption for NewMessageReceiver which enables the caller to overwrite the -// default behaviour defined by ParseChannel function +// default behaviour defined by ParseChannel function. func ResolveChannelFromHostHeader(hostToChannelFunc ResolveChannelFromHostFunc) ReceiverOptions { return func(r *MessageReceiver) error { r.hostToChannelFunc = hostToChannelFunc diff --git a/pkg/sidecar/fanout/fanout_handler_test.go b/pkg/sidecar/fanout/fanout_handler_test.go index 1163144c8e4..95e4752b1c6 100644 --- a/pkg/sidecar/fanout/fanout_handler_test.go +++ b/pkg/sidecar/fanout/fanout_handler_test.go @@ -227,8 +227,7 @@ func TestFanoutHandler_ServeHTTP(t *testing.T) { h, err := NewHandler(zap.NewNop(), Config{Subscriptions: subs}) if err != nil { - t.Errorf("NewHandler failed. Error:%s", err) - t.FailNow() + t.Fatalf("NewHandler failed. Error:%s", err) } if tc.asyncHandler { h.config.AsyncHandler = true @@ -236,8 +235,7 @@ func TestFanoutHandler_ServeHTTP(t *testing.T) { if tc.receiverFunc != nil { receiver, err := provisioners.NewMessageReceiver(tc.receiverFunc, zap.NewNop().Sugar()) if err != nil { - t.Errorf("NewMessageReceiver failed. Error:%s", err) - t.FailNow() + t.Fatalf("NewMessageReceiver failed. Error:%s", err) } h.receiver = receiver } diff --git a/pkg/sidecar/multichannelfanout/config.go b/pkg/sidecar/multichannelfanout/config.go index 1c3ca420def..77f97a2e807 100644 --- a/pkg/sidecar/multichannelfanout/config.go +++ b/pkg/sidecar/multichannelfanout/config.go @@ -35,7 +35,7 @@ type ChannelConfig struct { FanoutConfig fanout.Config `json:"fanoutConfig"` } -// NewConfigFromChannels creates a new Config from the list of channels +// NewConfigFromChannels creates a new Config from the list of channels. func NewConfigFromChannels(channels []v1alpha1.Channel) *Config { cc := make([]ChannelConfig, 0) for _, c := range channels { diff --git a/pkg/sidecar/multichannelfanout/config_test.go b/pkg/sidecar/multichannelfanout/config_test.go index a6d3d5ed782..0d7afe8d09f 100644 --- a/pkg/sidecar/multichannelfanout/config_test.go +++ b/pkg/sidecar/multichannelfanout/config_test.go @@ -14,16 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package multichannelfanout provides an http.Handler that takes in one request to a Knative -// Channel and fans it out to N other requests. Logically, it represents multiple Knative Channels. -// It is made up of a map, map[channel]fanout.Handler and each incoming request is inspected to -// determine which Channel it is on. This Handler delegates the HTTP handling to the fanout.Handler -// corresponding to the incoming request's Channel. -// It is often used in conjunction with a swappable.Handler. The swappable.Handler delegates all its -// requests to the multichannelfanout.Handler. When a new configuration is available, a new -// multichannelfanout.Handler is created and swapped in for all subsequent requests. The old -// multichannelfanout.Handler is discarded. - package multichannelfanout import ( @@ -53,7 +43,7 @@ func TestNewConfigFromChannels(t *testing.T) { }, { name: "one channel with no subscribers", channels: []v1alpha1.Channel{ - makechannel("chan-1", "ns-1", "a.b.c.d", nil), + makeChannel("chan-1", "ns-1", "a.b.c.d", nil), }, expected: &Config{ ChannelConfigs: []ChannelConfig{ @@ -67,8 +57,8 @@ func TestNewConfigFromChannels(t *testing.T) { }, { name: "multiple channels with subscribers", channels: []v1alpha1.Channel{ - makechannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))), - makechannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), + makeChannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))), + makeChannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))), }, expected: &Config{ ChannelConfigs: []ChannelConfig{ @@ -108,7 +98,7 @@ func TestNewConfigFromChannels(t *testing.T) { } } -func makechannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) v1alpha1.Channel { +func makeChannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) v1alpha1.Channel { c := v1alpha1.Channel{ Spec: v1alpha1.ChannelSpec{ Subscribable: subscribable, From d71fecf9fa2a53eda24c61beaff532198f69216d Mon Sep 17 00:00:00 2001 From: akashrv Date: Thu, 18 Apr 2019 14:56:35 -0700 Subject: [PATCH 25/37] Added back permission that was removed by mistake --- contrib/kafka/config/kafka.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index 990c7aa20cc..31506c804de 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -67,6 +67,14 @@ rules: - kafka-channel-dispatcher verbs: - update + - apiGroups: + - "" # Core API Group. + resources: + - events + verbs: + - create + - patch + - update --- apiVersion: rbac.authorization.k8s.io/v1 From e3e175c743cbdb3a19abbbcc7afbf6934d000b2b Mon Sep 17 00:00:00 2001 From: akashrv Date: Thu, 18 Apr 2019 16:38:34 -0700 Subject: [PATCH 26/37] WIP --- .../gcppubsub/pkg/controller/channel/reconcile_test.go | 2 -- contrib/kafka/cmd/controller/main.go | 2 -- contrib/kafka/config/kafka.yaml | 10 ---------- contrib/kafka/main.go | 4 ++-- 4 files changed, 2 insertions(+), 16 deletions(-) diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go b/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go index c48b337f536..d4bdebae22a 100644 --- a/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go +++ b/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go @@ -36,7 +36,6 @@ import ( "github.com/knative/eventing/pkg/reconciler/names" controllertesting "github.com/knative/eventing/pkg/reconciler/testing" "github.com/knative/eventing/pkg/utils" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "github.com/knative/pkg/system" _ "github.com/knative/pkg/system/testing" "go.uber.org/zap" @@ -110,7 +109,6 @@ func init() { // Add types to scheme. eventingv1alpha1.AddToScheme(scheme.Scheme) corev1.AddToScheme(scheme.Scheme) - istiov1alpha3.AddToScheme(scheme.Scheme) } func TestInjectClient(t *testing.T) { diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index 375361f4af3..be99d7231ef 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -8,7 +8,6 @@ import ( "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -47,7 +46,6 @@ func _main() int { // Add custom types to this array to get them into the manager's scheme. schemeFuncs := []SchemeFunc{ eventingv1alpha.AddToScheme, - istiov1alpha3.AddToScheme, } for _, schemeFunc := range schemeFuncs { schemeFunc(mgr.GetScheme()) diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index c58fb14296b..ef596f113fd 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -67,16 +67,6 @@ rules: - kafka-channel-dispatcher verbs: - update - - apiGroups: - - networking.istio.io - resources: - - virtualservices - verbs: - - get - - list - - watch - - create - - update - apiGroups: - "" # Core API Group. resources: diff --git a/contrib/kafka/main.go b/contrib/kafka/main.go index ed98481c20b..37af93a15c4 100644 --- a/contrib/kafka/main.go +++ b/contrib/kafka/main.go @@ -8,7 +8,7 @@ import ( "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -47,7 +47,7 @@ func main() { // Add custom types to this array to get them into the manager's scheme. schemeFuncs := []SchemeFunc{ eventingv1alpha.AddToScheme, - istiov1alpha3.AddToScheme, + v1alpha3.AddToScheme, } for _, schemeFunc := range schemeFuncs { schemeFunc(mgr.GetScheme()) From 6f5d4f8fc51c789f0d1c2bad0e06e064a4035cc3 Mon Sep 17 00:00:00 2001 From: akashrv Date: Thu, 18 Apr 2019 16:45:59 -0700 Subject: [PATCH 27/37] Remove istio references --- contrib/kafka/main.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/contrib/kafka/main.go b/contrib/kafka/main.go index ed98481c20b..62df224cc98 100644 --- a/contrib/kafka/main.go +++ b/contrib/kafka/main.go @@ -8,7 +8,6 @@ import ( "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -47,7 +46,6 @@ func main() { // Add custom types to this array to get them into the manager's scheme. schemeFuncs := []SchemeFunc{ eventingv1alpha.AddToScheme, - istiov1alpha3.AddToScheme, } for _, schemeFunc := range schemeFuncs { schemeFunc(mgr.GetScheme()) From 0f66d6852d9992b70ddc041e16e35527f257cef8 Mon Sep 17 00:00:00 2001 From: akashrv Date: Thu, 18 Apr 2019 17:03:47 -0700 Subject: [PATCH 28/37] WIP --- contrib/kafka/cmd/controller/main.go | 3 +- contrib/kafka/config/kafka.yaml | 2 +- .../pkg/controller/channel/controller.go | 11 ----- .../natss/pkg/controller/channel/reconcile.go | 1 - .../pkg/controller/channel/reconcile_test.go | 47 ------------------- contrib/natss/pkg/controller/main.go | 2 - 6 files changed, 2 insertions(+), 64 deletions(-) diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index 375361f4af3..a6757be2709 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -8,7 +8,6 @@ import ( "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -47,7 +46,7 @@ func _main() int { // Add custom types to this array to get them into the manager's scheme. schemeFuncs := []SchemeFunc{ eventingv1alpha.AddToScheme, - istiov1alpha3.AddToScheme, + v1alpha3.AddToScheme, } for _, schemeFunc := range schemeFuncs { schemeFunc(mgr.GetScheme()) diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index 31506c804de..d5ddb1e96e8 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -98,7 +98,7 @@ metadata: namespace: knative-eventing data: # Broker URL's for the provisioner. Replace this with the URL's for your kafka cluster. - bootstrap_servers: kafkabroker.kafka:9092 + bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. diff --git a/contrib/natss/pkg/controller/channel/controller.go b/contrib/natss/pkg/controller/channel/controller.go index 9968eedf9e2..5427bb180c1 100644 --- a/contrib/natss/pkg/controller/channel/controller.go +++ b/contrib/natss/pkg/controller/channel/controller.go @@ -24,7 +24,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" corev1 "k8s.io/api/core/v1" ) @@ -65,15 +64,5 @@ func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Cont logger.Error("Unable to watch K8s Services.", zap.Error(err)) return nil, err } - - // Watch the VirtualServices that are owned by Channels. - err = c.Watch(&source.Kind{ - Type: &istiov1alpha3.VirtualService{}, - }, &handler.EnqueueRequestForOwner{OwnerType: &eventingv1alpha1.Channel{}, IsController: true}) - if err != nil { - logger.Error("Unable to watch VirtualServices.", zap.Error(err)) - return nil, err - } - return c, nil } diff --git a/contrib/natss/pkg/controller/channel/reconcile.go b/contrib/natss/pkg/controller/channel/reconcile.go index 2429d5eff62..7f8b393c886 100644 --- a/contrib/natss/pkg/controller/channel/reconcile.go +++ b/contrib/natss/pkg/controller/channel/reconcile.go @@ -115,7 +115,6 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) // We are syncing two things: // 1. The K8s Service to talk to this Channel. - // 2. The Istio VirtualService to talk to this Channel. if c.DeletionTimestamp != nil { // K8s garbage collection will delete the K8s service and VirtualService for this channel. diff --git a/contrib/natss/pkg/controller/channel/reconcile_test.go b/contrib/natss/pkg/controller/channel/reconcile_test.go index e5b15005932..7b02bbfbcf9 100644 --- a/contrib/natss/pkg/controller/channel/reconcile_test.go +++ b/contrib/natss/pkg/controller/channel/reconcile_test.go @@ -26,7 +26,6 @@ import ( controllertesting "github.com/knative/eventing/pkg/reconciler/testing" "github.com/knative/eventing/pkg/utils" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "github.com/knative/pkg/system" _ "github.com/knative/pkg/system/testing" corev1 "k8s.io/api/core/v1" @@ -57,7 +56,6 @@ var ( func init() { // Add types to scheme eventingv1alpha1.AddToScheme(scheme.Scheme) - istiov1alpha3.AddToScheme(scheme.Scheme) } var testCases = []controllertesting.TestCase{ @@ -217,51 +215,6 @@ func makeNewClusterChannelProvisioner(name string, isReady bool) *eventingv1alph clusterChannelProvisioner.ObjectMeta.SelfLink = "" return clusterChannelProvisioner } -func makeVirtualService() *istiov1alpha3.VirtualService { - return &istiov1alpha3.VirtualService{ - TypeMeta: metav1.TypeMeta{ - APIVersion: istiov1alpha3.SchemeGroupVersion.String(), - Kind: "VirtualService", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-channel", testNS), - Namespace: testNS, - Labels: map[string]string{ - "channel": channelName, - "provisioner": clusterChannelProvisionerName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), - Kind: "Channel", - Name: channelName, - UID: testUID, - Controller: &truePointer, - BlockOwnerDeletion: &truePointer, - }, - }, - }, - Spec: istiov1alpha3.VirtualServiceSpec{ - Hosts: []string{ - serviceAddress, - fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()), - }, - HTTP: []istiov1alpha3.HTTPRoute{{ - Rewrite: &istiov1alpha3.HTTPRewrite{ - Authority: fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()), - }, - Route: []istiov1alpha3.HTTPRouteDestination{{ - Destination: istiov1alpha3.Destination{ - Host: "kafka-provisioner.knative-eventing.svc." + utils.GetClusterDomainName(), - Port: istiov1alpha3.PortSelector{ - Number: provisioners.PortNumber, - }, - }}, - }}, - }, - }, - } -} func om(namespace, name string) metav1.ObjectMeta { return metav1.ObjectMeta{ diff --git a/contrib/natss/pkg/controller/main.go b/contrib/natss/pkg/controller/main.go index 531e8901f7c..26cb9f6047a 100644 --- a/contrib/natss/pkg/controller/main.go +++ b/contrib/natss/pkg/controller/main.go @@ -24,7 +24,6 @@ import ( "github.com/knative/eventing/contrib/natss/pkg/util" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "github.com/knative/pkg/signals" "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -48,7 +47,6 @@ func main() { // Add custom types to this array to get them into the manager's scheme. eventingv1alpha1.AddToScheme(mgr.GetScheme()) - istiov1alpha3.AddToScheme(mgr.GetScheme()) _, err = clusterchannelprovisioner.ProvideController(mgr, util.GetDefaultNatssURL(), util.GetDefaultClusterID(), logger.Desugar()) if err != nil { From 9f53403a0ac6c1b94de585a9ed46649bc6dc0135 Mon Sep 17 00:00:00 2001 From: akashrv Date: Thu, 18 Apr 2019 17:05:52 -0700 Subject: [PATCH 29/37] Removed one more reference of istio --- contrib/kafka/cmd/controller/main.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index 375361f4af3..be99d7231ef 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -8,7 +8,6 @@ import ( "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -47,7 +46,6 @@ func _main() int { // Add custom types to this array to get them into the manager's scheme. schemeFuncs := []SchemeFunc{ eventingv1alpha.AddToScheme, - istiov1alpha3.AddToScheme, } for _, schemeFunc := range schemeFuncs { schemeFunc(mgr.GetScheme()) From 98f6ff9d874428836d52671de94a3aa02607269f Mon Sep 17 00:00:00 2001 From: akashrv Date: Fri, 19 Apr 2019 11:19:11 -0700 Subject: [PATCH 30/37] Revert kafka.yaml local change --- contrib/kafka/config/kafka.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index d5ddb1e96e8..31506c804de 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -98,7 +98,7 @@ metadata: namespace: knative-eventing data: # Broker URL's for the provisioner. Replace this with the URL's for your kafka cluster. - bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 + bootstrap_servers: kafkabroker.kafka:9092 # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. From 07995ddba5ee1ec3e890acc6b615fecd34fcc84f Mon Sep 17 00:00:00 2001 From: akashrv Date: Fri, 19 Apr 2019 12:06:24 -0700 Subject: [PATCH 31/37] WIP --- cmd/controller/main.go | 6 +++--- contrib/gcppubsub/config/gcppubsub.yaml | 19 ------------------- pkg/reconciler/v1alpha1/trigger/trigger.go | 3 +-- .../v1alpha1/trigger/trigger_test.go | 2 -- 4 files changed, 4 insertions(+), 26 deletions(-) diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 2eea7fdd832..e369447cafa 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -19,16 +19,18 @@ package main import ( "context" "flag" - "github.com/knative/eventing/pkg/reconciler/subscription" "log" "net/http" "os" "time" + "github.com/knative/eventing/pkg/reconciler/subscription" + "k8s.io/apimachinery/pkg/runtime" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" @@ -41,7 +43,6 @@ import ( "github.com/knative/eventing/pkg/reconciler/v1alpha1/channel" "github.com/knative/eventing/pkg/reconciler/v1alpha1/namespace" "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "github.com/knative/pkg/configmap" kncontroller "github.com/knative/pkg/controller" "github.com/knative/pkg/logging/logkey" @@ -169,7 +170,6 @@ func startControllerRuntime(stopCh <-chan struct{}, cfg *rest.Config, logger *za // Add custom types to this array to get them into the manager's scheme. schemeFuncs := []SchemeFunc{ - istiov1alpha3.AddToScheme, eventingv1alpha1.AddToScheme, } for _, schemeFunc := range schemeFuncs { diff --git a/contrib/gcppubsub/config/gcppubsub.yaml b/contrib/gcppubsub/config/gcppubsub.yaml index ef22343e158..a8de1281945 100644 --- a/contrib/gcppubsub/config/gcppubsub.yaml +++ b/contrib/gcppubsub/config/gcppubsub.yaml @@ -225,22 +225,3 @@ spec: protocol: TCP port: 80 targetPort: 8080 - ---- - -# Needed by the GCP PubSub Channel to communicate with GCP PubSub. - -apiVersion: networking.istio.io/v1alpha3 -kind: ServiceEntry -metadata: - name: gcppubsub-bus-ext - namespace: knative-eventing -spec: - hosts: - - "*.googleapis.com" - - "accounts.google.com" - ports: - - number: 443 - name: https - protocol: HTTPS - location: MESH_EXTERNAL diff --git a/pkg/reconciler/v1alpha1/trigger/trigger.go b/pkg/reconciler/v1alpha1/trigger/trigger.go index ff60d792c16..5a41c45ef56 100644 --- a/pkg/reconciler/v1alpha1/trigger/trigger.go +++ b/pkg/reconciler/v1alpha1/trigger/trigger.go @@ -28,7 +28,6 @@ import ( "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/path" "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/resources" "github.com/knative/eventing/pkg/utils/resolve" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" @@ -95,7 +94,7 @@ func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Cont } // Watch all the resources that the Trigger reconciles. - for _, t := range []runtime.Object{&corev1.Service{}, &istiov1alpha3.VirtualService{}, &v1alpha1.Subscription{}} { + for _, t := range []runtime.Object{&corev1.Service{}, &v1alpha1.Subscription{}} { err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.Trigger{}, IsController: true}) if err != nil { return nil, err diff --git a/pkg/reconciler/v1alpha1/trigger/trigger_test.go b/pkg/reconciler/v1alpha1/trigger/trigger_test.go index e0e28920b68..690ca5105b9 100644 --- a/pkg/reconciler/v1alpha1/trigger/trigger_test.go +++ b/pkg/reconciler/v1alpha1/trigger/trigger_test.go @@ -31,7 +31,6 @@ import ( "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/resources" "github.com/knative/eventing/pkg/utils" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -79,7 +78,6 @@ var ( func init() { // Add types to scheme _ = v1alpha1.AddToScheme(scheme.Scheme) - _ = istiov1alpha3.AddToScheme(scheme.Scheme) } func TestProvideController(t *testing.T) { From 338045e291ea7e2062bfe83553d002867270e495 Mon Sep 17 00:00:00 2001 From: akashrv Date: Fri, 19 Apr 2019 15:27:38 -0700 Subject: [PATCH 32/37] Revert kafka dispatcher change --- contrib/kafka/pkg/dispatcher/dispatcher.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index c2009334935..718abd88584 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -144,12 +144,6 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error // Update the atomic value. d.setHostToChannelMap(hcMap) - hcMap, err := createHostToChannelMap(config) - if err != nil { - return err - } - d.setHostToChannelMap(hcMap) - // Update the config so that it can be used for comparison during next sync d.setConfig(config) } From cd26f6f98b6bb86ddcfe7b97fdfdfe0a19e73ee7 Mon Sep 17 00:00:00 2001 From: akashrv Date: Fri, 19 Apr 2019 16:05:12 -0700 Subject: [PATCH 33/37] Removing Mutex. No need to use Mutex when using atomic value for hostToChannelMap --- contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go index 7ee52945226..1a0c2f7c358 100644 --- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go +++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go @@ -63,8 +63,7 @@ type SubscriptionsSupervisor struct { natssConn *stan.Conn natssConnInProgress bool - hostToChannelMapMutex sync.Mutex - hostToChannelMap atomic.Value + hostToChannelMap atomic.Value } // NewDispatcher returns a new SubscriptionsSupervisor. @@ -314,11 +313,6 @@ func (s *SubscriptionsSupervisor) setHostToChannelMap(hcMap map[string]provision // It will update internal hostToChannelMap which is used to resolve the hostHeader of the // incoming request to the correct ChannelReference in the receiver function. func (s *SubscriptionsSupervisor) UpdateHostToChannelMap(ctx context.Context, chanList []eventingv1alpha1.Channel) error { - logging.FromContext(ctx).Info("UpdateHostToChannelMap: Acquiring mutex lock") - s.hostToChannelMapMutex.Lock() - defer s.hostToChannelMapMutex.Unlock() - logging.FromContext(ctx).Info("UpdateHostToChannelMap: Acquired mutex lock. Updating internal map") - hostToChanMap := make(map[string]provisioners.ChannelReference, len(chanList)) for _, c := range chanList { hostName := c.Status.Address.Hostname @@ -335,7 +329,7 @@ func (s *SubscriptionsSupervisor) UpdateHostToChannelMap(ctx context.Context, ch } s.setHostToChannelMap(hostToChanMap) - logging.FromContext(ctx).Info("UpdateHostToChannelMap: Update successful. Releasing mutex lock") + logging.FromContext(ctx).Info("hostToChannelMap updated successfully.") return nil } From d98920a8a26ac69cba2c377ac24f651483f567e2 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Mon, 22 Apr 2019 13:59:07 -0700 Subject: [PATCH 34/37] Removed named port from GCP dispatcher K8s service --- contrib/gcppubsub/config/gcppubsub.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/contrib/gcppubsub/config/gcppubsub.yaml b/contrib/gcppubsub/config/gcppubsub.yaml index a8de1281945..33382e241bb 100644 --- a/contrib/gcppubsub/config/gcppubsub.yaml +++ b/contrib/gcppubsub/config/gcppubsub.yaml @@ -221,7 +221,6 @@ spec: clusterChannelProvisioner: gcp-pubsub role: dispatcher ports: - - name: http - protocol: TCP + - protocol: TCP port: 80 targetPort: 8080 From 8c47946794194182e567fa609b2a4ccfd3d31d00 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Thu, 25 Apr 2019 10:15:32 -0700 Subject: [PATCH 35/37] WIP --- ---clusterChannelProvisioners=kafka | 9 - --clusterChannelProvisioners=kafka | 7795 ----------------- contrib/gcppubsub/config/gcppubsub.yaml | 8 + .../pkg/controller/channel/reconcile.go | 6 +- .../pkg/controller/channel/reconcile_test.go | 10 +- contrib/gcppubsub/pkg/controller/cmd/main.go | 5 +- contrib/gcppubsub/pkg/dispatcher/cmd/main.go | 3 +- .../pkg/dispatcher/dispatcher/controller.go | 6 +- .../pkg/dispatcher/dispatcher/reconcile.go | 7 +- .../dispatcher/dispatcher/reconcile_test.go | 18 +- .../pkg/dispatcher/receiver/receiver.go | 7 +- .../pkg/dispatcher/receiver/receiver_test.go | 15 +- .../pkg/dispatcher/dispatcher/dispatcher.go | 17 +- 13 files changed, 38 insertions(+), 7868 deletions(-) delete mode 100644 ---clusterChannelProvisioners=kafka delete mode 100644 --clusterChannelProvisioners=kafka diff --git a/---clusterChannelProvisioners=kafka b/---clusterChannelProvisioners=kafka deleted file mode 100644 index b36fa87f0cb..00000000000 --- a/---clusterChannelProvisioners=kafka +++ /dev/null @@ -1,9 +0,0 @@ -$PROJECT_ID is set to 'akashv-public', using it to run the tests -================================= -==== SETTING UP TEST CLUSTER ==== -================================= -- Cluster is gke_akashv-public_us-west1-b_kn-dev-2 -- User is verenkar@google.com -- Docker is us.gcr.io/akashv-public ->> Publishing test images ->> Publishing test images diff --git a/--clusterChannelProvisioners=kafka b/--clusterChannelProvisioners=kafka deleted file mode 100644 index e2a1427291f..00000000000 --- a/--clusterChannelProvisioners=kafka +++ /dev/null @@ -1,7795 +0,0 @@ -$PROJECT_ID is set to 'akashv-public', using it to run the tests -================================= -==== SETTING UP TEST CLUSTER ==== -================================= -- Cluster is gke_akashv-public_us-west1-b_kn-dev-2 -- User is verenkar@google.com -- Docker is us.gcr.io/akashv-public ->> Publishing test images ->> Publishing test images -Tagging us.gcr.io/akashv-public/transformevents@sha256:b3f8a1d064f42f01573626a676f683243e2ce58beea985b384277c6c61fb200a with e2e -Tagging us.gcr.io/akashv-public/sendevent@sha256:3626b282b2ced720497bcb6ff057d2db08d024fdd2eeb9bf89f7ef2b3f3750bb with e2e -Tagging us.gcr.io/akashv-public/logevents@sha256:94e704c729126cbe49c56bc2ab53cc4d1c7b322a3c1a7a8e46191c9a29984733 with e2e -Running tests with 'go test -race -v -count=1 -tags=e2e -timeout=20m ./test/e2e -run ^TestMain$ -runFromMain=true -clusterChannelProvisioners=in-memory-channel ' -2019-04-18T15:41:25.560-0700 info logging/config.go:39 Successfully created the logger. {"knative.dev/jsonconfig": "{\n\t \"level\": \"info\",\n\t \"encoding\": \"console\",\n\t \"outputPaths\": [\"stdout\"],\n\t \"errorOutputPaths\": [\"stderr\"],\n\t \"encoderConfig\": {\n\t \"timeKey\": \"ts\",\n\t \"messageKey\": \"message\",\n\t \"levelKey\": \"level\",\n\t \"nameKey\": \"logger\",\n\t \"callerKey\": \"caller\",\n\t \"messageKey\": \"msg\",\n\t \"stacktraceKey\": \"stacktrace\",\n\t \"lineEnding\": \"\",\n\t \"levelEncoder\": \"\",\n\t \"timeEncoder\": \"iso8601\",\n\t \"durationEncoder\": \"\",\n\t \"callerEncoder\": \"\"\n\t }\n\t}"} -2019-04-18T15:41:25.561-0700 info logging/config.go:39 Logging level set to info -2019-04-18T15:41:25.562-0700 warn logging/config.go:41 Fetch GitHub commit ID from kodata failed: "ref: refs/heads/noistiokafka" is not a valid GitHub commit ID -=== RUN TestMain -=== RUN TestMain/TestSingleBinaryEvent -=== PAUSE TestMain/TestSingleBinaryEvent -=== RUN TestMain/TestSingleStructuredEvent -=== PAUSE TestMain/TestSingleStructuredEvent -=== RUN TestMain/TestEventTransformation -=== PAUSE TestMain/TestEventTransformation -=== RUN TestMain/TestChannelChain -=== PAUSE TestMain/TestChannelChain -=== RUN TestMain/TestDefaultBrokerWithManyTriggers -=== PAUSE TestMain/TestDefaultBrokerWithManyTriggers -=== CONT TestMain/TestEventTransformation -=== CONT TestMain/TestChannelChain -=== CONT TestMain/TestSingleBinaryEvent -=== CONT TestMain/TestDefaultBrokerWithManyTriggers -=== CONT TestMain/TestSingleStructuredEvent -2019/04/18 15:41:42 Checking triggers: [trigger-testany-source1 trigger-testany-testany trigger-type1-source1 trigger-type1-testany] ---- PASS: TestMain (0.63s) - main_test.go:52: Running "TestSingleBinaryEvent" with "in-memory-channel" ClusterChannelProvisioner - main_test.go:52: Running "TestSingleStructuredEvent" with "in-memory-channel" ClusterChannelProvisioner - main_test.go:52: Running "TestEventTransformation" with "in-memory-channel" ClusterChannelProvisioner - main_test.go:52: Running "TestChannelChain" with "in-memory-channel" ClusterChannelProvisioner - main_test.go:52: Running "TestDefaultBrokerWithManyTriggers" with "in-memory-channel" ClusterChannelProvisioner - --- PASS: TestMain/TestSingleStructuredEvent (80.80s) - e2e.go:86: Creating Namespace: testsinglestructuredevent-in-memory-channel - single_event_test.go:54: creating logger pod - e2e.go:86: Pod "e2e-singleevent-logger-pod-structured" starts running - single_event_test.go:64: Creating Channel and Subscription - e2e.go:86: Sending fake CloudEvent - e2e.go:86: Creating event sender pod - e2e.go:86: Sender pod starts running - e2e.go:86: Cleaning resource: "e2e-singleevent-sender-structured" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:41:37Z", - "name": "e2e-singleevent-sender-structured", - "namespace": "testsinglestructuredevent-in-memory-channel", - "resourceVersion": "2720723", - "selfLink": "/api/v1/namespaces/testsinglestructuredevent-in-memory-channel/pods/e2e-singleevent-sender-structured", - "uid": "1ff5144a-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "args": [ - "-event-id", - "", - "-event-type", - "dev.knative.test.event", - "-source", - "e2e-singleevent-sender-structured", - "-data", - "{\"msg\":\"TestSingleEvent 1ff0e186-622b-11e9-8c26-acde48001122\"}", - "-encoding", - "structured", - "-sink", - "http://e2e-singleevent-structured-channel-2sgjz.testsinglestructuredevent-in-memory-channel.svc.cluster.local" - ], - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imagePullPolicy": "Always", - "name": "sendevent", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-m6zfx", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "e2e-singleevent-sender-structured.testsinglestructuredevent-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-m6zfx", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-vx0d", - "priority": 0, - "restartPolicy": "Never", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-m6zfx", - "secret": { - "defaultMode": 420, - "secretName": "default-token-m6zfx" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:41Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:45Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:45Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:37Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://a7ea578d9a06824513191099ae11574c6da107d51f832e64a8a20e6b5e61390b", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:42Z" - } - } - }, - { - "containerID": "docker://0dde6ee6aadf8710457eaf47b994c64401ad3f1fee39291318289619e92f591a", - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/sendevent@sha256:3626b282b2ced720497bcb6ff057d2db08d024fdd2eeb9bf89f7ef2b3f3750bb", - "lastState": {}, - "name": "sendevent", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:42Z" - } - } - } - ], - "hostIP": "10.138.0.48", - "initContainerStatuses": [ - { - "containerID": "docker://c6aa98a63ab9ad20525592cb8157c0124d6c03847e117f2de43415d5fd1ff040", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://c6aa98a63ab9ad20525592cb8157c0124d6c03847e117f2de43415d5fd1ff040", - "exitCode": 0, - "finishedAt": "2019-04-18T22:41:41Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:41:40Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.2.227", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:41:37Z" - } - } - e2e.go:86: Waiting for e2e-singleevent-sender-structured to be deleted - e2e.go:86: Cleaning resource: "e2e-singleevent-subscription-structured" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Subscription", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:36Z", - "finalizers": [ - "subscription-controller" - ], - "generation": 1, - "name": "e2e-singleevent-subscription-structured", - "namespace": "testsinglestructuredevent-in-memory-channel", - "resourceVersion": "2720569", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testsinglestructuredevent-in-memory-channel/subscriptions/e2e-singleevent-subscription-structured", - "uid": "1fdd9aeb-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "channel": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "name": "e2e-singleevent-structured" - }, - "subscriber": { - "ref": { - "apiVersion": "v1", - "kind": "Service", - "name": "e2e-singleevent-logger-pod-structured" - } - } - }, - "status": { - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:37Z", - "status": "True", - "type": "ChannelReady" - }, - { - "lastTransitionTime": "2019-04-18T22:41:37Z", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2019-04-18T22:41:37Z", - "status": "True", - "type": "Resolved" - } - ], - "physicalSubscription": { - "subscriberURI": "http://e2e-singleevent-logger-pod-structured.testsinglestructuredevent-in-memory-channel.svc.cluster.local/" - } - } - } - e2e.go:86: Waiting for e2e-singleevent-subscription-structured to be deleted - e2e.go:86: Cleaning resource: "e2e-singleevent-structured" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:35Z", - "generation": 3, - "name": "e2e-singleevent-structured", - "namespace": "testsinglestructuredevent-in-memory-channel", - "resourceVersion": "2720941", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testsinglestructuredevent-in-memory-channel/channels/e2e-singleevent-structured", - "uid": "1f23697f-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "provisioner": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "ClusterChannelProvisioner", - "name": "in-memory-channel" - }, - "subscribable": {} - }, - "status": { - "address": { - "hostname": "e2e-singleevent-structured-channel-2sgjz.testsinglestructuredevent-in-memory-channel.svc.cluster.local" - }, - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:35Z", - "status": "True", - "type": "Addressable" - }, - { - "lastTransitionTime": "2019-04-18T22:41:35Z", - "status": "True", - "type": "Provisioned" - }, - { - "lastTransitionTime": "2019-04-18T22:41:35Z", - "status": "True", - "type": "ProvisionerInstalled" - }, - { - "lastTransitionTime": "2019-04-18T22:41:35Z", - "status": "True", - "type": "Ready" - } - ] - } - } - e2e.go:86: Waiting for e2e-singleevent-structured to be deleted - e2e.go:86: Cleaning resource: "e2e-singleevent-logger-pod-structured" - { - "apiVersion": "v1", - "kind": "Service", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:35Z", - "name": "e2e-singleevent-logger-pod-structured", - "namespace": "testsinglestructuredevent-in-memory-channel", - "resourceVersion": "2720550", - "selfLink": "/api/v1/namespaces/testsinglestructuredevent-in-memory-channel/services/e2e-singleevent-logger-pod-structured", - "uid": "1f089327-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "clusterIP": "10.19.247.162", - "ports": [ - { - "name": "http", - "port": 80, - "protocol": "TCP", - "targetPort": 8080 - } - ], - "selector": { - "e2etest": "1977c810-622b-11e9-8c26-acde48001122" - }, - "sessionAffinity": "None", - "type": "ClusterIP" - }, - "status": { - "loadBalancer": {} - } - } - e2e.go:86: Waiting for e2e-singleevent-logger-pod-structured to be deleted - e2e.go:86: Cleaning resource: "e2e-singleevent-logger-pod-structured" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:41:26Z", - "labels": { - "e2etest": "1977c810-622b-11e9-8c26-acde48001122" - }, - "name": "e2e-singleevent-logger-pod-structured", - "namespace": "testsinglestructuredevent-in-memory-channel", - "resourceVersion": "2720587", - "selfLink": "/api/v1/namespaces/testsinglestructuredevent-in-memory-channel/pods/e2e-singleevent-logger-pod-structured", - "uid": "19850fd3-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "image": "us.gcr.io/akashv-public/logevents:latest", - "imagePullPolicy": "Always", - "name": "logevents", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-m6zfx", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "e2e-singleevent-logger-pod-structured.testsinglestructuredevent-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - }, - { - "name": "ISTIO_METAJSON_LABELS", - "value": "{\"e2etest\":\"1977c810-622b-11e9-8c26-acde48001122\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-m6zfx", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-k5z3", - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-m6zfx", - "secret": { - "defaultMode": 420, - "secretName": "default-token-m6zfx" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:29Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:32Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:32Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:26Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://1ea656a5d1a94113fb9897bd4a7d8e2bc9fa7715617452426c7475ae35179570", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:30Z" - } - } - }, - { - "containerID": "docker://cd254e7c4d220466f75e57f0a2372f044098a6917f8dbb61676a3d9c6df2c747", - "image": "us.gcr.io/akashv-public/logevents:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/logevents@sha256:94e704c729126cbe49c56bc2ab53cc4d1c7b322a3c1a7a8e46191c9a29984733", - "lastState": {}, - "name": "logevents", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:30Z" - } - } - } - ], - "hostIP": "10.138.0.50", - "initContainerStatuses": [ - { - "containerID": "docker://02e093715820400efe7360992e32194feb2f8cfe6e84a829a997d01da653723e", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://02e093715820400efe7360992e32194feb2f8cfe6e84a829a997d01da653723e", - "exitCode": 0, - "finishedAt": "2019-04-18T22:41:29Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:41:28Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.0.50", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:41:26Z" - } - } - e2e.go:86: Waiting for e2e-singleevent-logger-pod-structured to be deleted - --- PASS: TestMain/TestSingleBinaryEvent (89.22s) - e2e.go:86: Creating Namespace: testsinglebinaryevent-in-memory-channel - single_event_test.go:54: creating logger pod - e2e.go:86: Pod "e2e-singleevent-logger-pod-binary" starts running - single_event_test.go:64: Creating Channel and Subscription - e2e.go:86: Sending fake CloudEvent - e2e.go:86: Creating event sender pod - e2e.go:86: Sender pod starts running - e2e.go:86: Cleaning resource: "e2e-singleevent-sender-binary" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:41:43Z", - "name": "e2e-singleevent-sender-binary", - "namespace": "testsinglebinaryevent-in-memory-channel", - "resourceVersion": "2720763", - "selfLink": "/api/v1/namespaces/testsinglebinaryevent-in-memory-channel/pods/e2e-singleevent-sender-binary", - "uid": "2412c3ec-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "args": [ - "-event-id", - "", - "-event-type", - "dev.knative.test.event", - "-source", - "e2e-singleevent-sender-binary", - "-data", - "{\"msg\":\"TestSingleEvent 240e483a-622b-11e9-8c26-acde48001122\"}", - "-encoding", - "binary", - "-sink", - "http://e2e-singleevent-binary-channel-hx5n6.testsinglebinaryevent-in-memory-channel.svc.cluster.local" - ], - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imagePullPolicy": "Always", - "name": "sendevent", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-27q4q", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "e2e-singleevent-sender-binary.testsinglebinaryevent-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-27q4q", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-p02p", - "priority": 0, - "restartPolicy": "Never", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-27q4q", - "secret": { - "defaultMode": 420, - "secretName": "default-token-27q4q" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:47Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:48Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:48Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:44Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://4d03cce373b3f842e80f05491da61258220553ee31378934b8072a5e86ffb664", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:47Z" - } - } - }, - { - "containerID": "docker://ce3b6de4d669af2d36cce8e13a50ee3441c2aaadb8960b1761564be2f790ff42", - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/sendevent@sha256:3626b282b2ced720497bcb6ff057d2db08d024fdd2eeb9bf89f7ef2b3f3750bb", - "lastState": {}, - "name": "sendevent", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:47Z" - } - } - } - ], - "hostIP": "10.138.0.49", - "initContainerStatuses": [ - { - "containerID": "docker://954d171efecf6e634f28c66b5fb1745827f797368830610f0cb10048f3de3266", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://954d171efecf6e634f28c66b5fb1745827f797368830610f0cb10048f3de3266", - "exitCode": 0, - "finishedAt": "2019-04-18T22:41:46Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:41:45Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.1.104", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:41:44Z" - } - } - e2e.go:86: Waiting for e2e-singleevent-sender-binary to be deleted - e2e.go:86: Cleaning resource: "e2e-singleevent-subscription-binary" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Subscription", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:40Z", - "finalizers": [ - "subscription-controller" - ], - "generation": 1, - "name": "e2e-singleevent-subscription-binary", - "namespace": "testsinglebinaryevent-in-memory-channel", - "resourceVersion": "2720702", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testsinglebinaryevent-in-memory-channel/subscriptions/e2e-singleevent-subscription-binary", - "uid": "22291b2a-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "channel": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "name": "e2e-singleevent-binary" - }, - "subscriber": { - "ref": { - "apiVersion": "v1", - "kind": "Service", - "name": "e2e-singleevent-logger-pod-binary" - } - } - }, - "status": { - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:43Z", - "status": "True", - "type": "ChannelReady" - }, - { - "lastTransitionTime": "2019-04-18T22:41:43Z", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "Resolved" - } - ], - "physicalSubscription": { - "subscriberURI": "http://e2e-singleevent-logger-pod-binary.testsinglebinaryevent-in-memory-channel.svc.cluster.local/" - } - } - } - e2e.go:86: Waiting for e2e-singleevent-subscription-binary to be deleted - e2e.go:86: Cleaning resource: "e2e-singleevent-binary" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:39Z", - "generation": 3, - "name": "e2e-singleevent-binary", - "namespace": "testsinglebinaryevent-in-memory-channel", - "resourceVersion": "2721024", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testsinglebinaryevent-in-memory-channel/channels/e2e-singleevent-binary", - "uid": "21737768-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "provisioner": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "ClusterChannelProvisioner", - "name": "in-memory-channel" - }, - "subscribable": {} - }, - "status": { - "address": { - "hostname": "e2e-singleevent-binary-channel-hx5n6.testsinglebinaryevent-in-memory-channel.svc.cluster.local" - }, - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:40Z", - "status": "True", - "type": "Addressable" - }, - { - "lastTransitionTime": "2019-04-18T22:41:40Z", - "status": "True", - "type": "Provisioned" - }, - { - "lastTransitionTime": "2019-04-18T22:41:40Z", - "status": "True", - "type": "ProvisionerInstalled" - }, - { - "lastTransitionTime": "2019-04-18T22:41:40Z", - "status": "True", - "type": "Ready" - } - ] - } - } - e2e.go:86: Waiting for e2e-singleevent-binary to be deleted - e2e.go:86: Cleaning resource: "e2e-singleevent-logger-pod-binary" - { - "apiVersion": "v1", - "kind": "Service", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:39Z", - "name": "e2e-singleevent-logger-pod-binary", - "namespace": "testsinglebinaryevent-in-memory-channel", - "resourceVersion": "2720616", - "selfLink": "/api/v1/namespaces/testsinglebinaryevent-in-memory-channel/services/e2e-singleevent-logger-pod-binary", - "uid": "216422e7-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "clusterIP": "10.19.252.31", - "ports": [ - { - "name": "http", - "port": 80, - "protocol": "TCP", - "targetPort": 8080 - } - ], - "selector": { - "e2etest": "1977c1e4-622b-11e9-8c26-acde48001122" - }, - "sessionAffinity": "None", - "type": "ClusterIP" - }, - "status": { - "loadBalancer": {} - } - } - e2e.go:86: Waiting for e2e-singleevent-logger-pod-binary to be deleted - e2e.go:86: Cleaning resource: "e2e-singleevent-logger-pod-binary" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:41:26Z", - "labels": { - "e2etest": "1977c1e4-622b-11e9-8c26-acde48001122" - }, - "name": "e2e-singleevent-logger-pod-binary", - "namespace": "testsinglebinaryevent-in-memory-channel", - "resourceVersion": "2720604", - "selfLink": "/api/v1/namespaces/testsinglebinaryevent-in-memory-channel/pods/e2e-singleevent-logger-pod-binary", - "uid": "1984846b-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "image": "us.gcr.io/akashv-public/logevents:latest", - "imagePullPolicy": "Always", - "name": "logevents", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-27q4q", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "e2e-singleevent-logger-pod-binary.testsinglebinaryevent-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - }, - { - "name": "ISTIO_METAJSON_LABELS", - "value": "{\"e2etest\":\"1977c1e4-622b-11e9-8c26-acde48001122\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-27q4q", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-k5z3", - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-27q4q", - "secret": { - "defaultMode": 420, - "secretName": "default-token-27q4q" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:30Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:33Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:33Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:26Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://24d334e25b996c3490e518180182847a2a7aa5db5121192dba18c97878712754", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:30Z" - } - } - }, - { - "containerID": "docker://abf4403e34bfbd291a6c8f5d1b25e31f7281dca2efe87155be26b55765896607", - "image": "us.gcr.io/akashv-public/logevents:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/logevents@sha256:94e704c729126cbe49c56bc2ab53cc4d1c7b322a3c1a7a8e46191c9a29984733", - "lastState": {}, - "name": "logevents", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:30Z" - } - } - } - ], - "hostIP": "10.138.0.50", - "initContainerStatuses": [ - { - "containerID": "docker://321664f1863754218f3b48cabea077304abe2f86ccf1e929df846e70a7a32742", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://321664f1863754218f3b48cabea077304abe2f86ccf1e929df846e70a7a32742", - "exitCode": 0, - "finishedAt": "2019-04-18T22:41:29Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:41:28Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.0.49", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:41:26Z" - } - } - e2e.go:86: Waiting for e2e-singleevent-logger-pod-binary to be deleted - --- PASS: TestMain/TestChannelChain (90.31s) - e2e.go:86: Creating Namespace: testchannelchain-in-memory-channel - channel_chain_test.go:50: creating logger pod - e2e.go:86: Pod "e2e-channelchain-logger-pod" starts running - channel_chain_test.go:60: Creating Channel and Subscription - e2e.go:86: Sending fake CloudEvent - e2e.go:86: Creating event sender pod - e2e.go:86: Sender pod starts running - e2e.go:86: Cleaning resource: "e2e-channelchain-sender" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:41:42Z", - "name": "e2e-channelchain-sender", - "namespace": "testchannelchain-in-memory-channel", - "resourceVersion": "2720767", - "selfLink": "/api/v1/namespaces/testchannelchain-in-memory-channel/pods/e2e-channelchain-sender", - "uid": "231ee9c8-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "args": [ - "-event-id", - "", - "-event-type", - "dev.knative.test.event", - "-source", - "e2e-channelchain-sender", - "-data", - "{\"msg\":\"TestChannelChainEvent 2319c1f2-622b-11e9-8c26-acde48001122\"}", - "-encoding", - "binary", - "-sink", - "http://e2e-channelchain1-channel-w44lt.testchannelchain-in-memory-channel.svc.cluster.local" - ], - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imagePullPolicy": "Always", - "name": "sendevent", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-k8k8f", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "e2e-channelchain-sender.testchannelchain-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-k8k8f", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-k5z3", - "priority": 0, - "restartPolicy": "Never", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-k8k8f", - "secret": { - "defaultMode": 420, - "secretName": "default-token-k8k8f" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:45Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:48Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:48Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://ff5f206e74e9fd716fed3d2226900239224c14b4713c4758be17f8c9cf842d1d", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:46Z" - } - } - }, - { - "containerID": "docker://82fc291f9e34664dce6eff094cd87e64f1e9e0164f9d541e90313864ab87ae40", - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/sendevent@sha256:3626b282b2ced720497bcb6ff057d2db08d024fdd2eeb9bf89f7ef2b3f3750bb", - "lastState": {}, - "name": "sendevent", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:46Z" - } - } - } - ], - "hostIP": "10.138.0.50", - "initContainerStatuses": [ - { - "containerID": "docker://f3c1c470b30d725914b1b15ad8aa936aa38fff272e2d75451dbaab075301716d", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://f3c1c470b30d725914b1b15ad8aa936aa38fff272e2d75451dbaab075301716d", - "exitCode": 0, - "finishedAt": "2019-04-18T22:41:45Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:41:44Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.0.53", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:41:42Z" - } - } - e2e.go:86: Waiting for e2e-channelchain-sender to be deleted - e2e.go:86: Cleaning resource: "e2e-channelchain-subs21" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Subscription", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:39Z", - "finalizers": [ - "subscription-controller" - ], - "generation": 1, - "name": "e2e-channelchain-subs21", - "namespace": "testchannelchain-in-memory-channel", - "resourceVersion": "2720649", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testchannelchain-in-memory-channel/subscriptions/e2e-channelchain-subs21", - "uid": "211fe6ee-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "channel": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "name": "e2e-channelchain2" - }, - "subscriber": { - "ref": { - "apiVersion": "v1", - "kind": "Service", - "name": "e2e-channelchain-logger-pod" - } - } - }, - "status": { - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:41Z", - "status": "True", - "type": "ChannelReady" - }, - { - "lastTransitionTime": "2019-04-18T22:41:41Z", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2019-04-18T22:41:41Z", - "status": "True", - "type": "Resolved" - } - ], - "physicalSubscription": { - "subscriberURI": "http://e2e-channelchain-logger-pod.testchannelchain-in-memory-channel.svc.cluster.local/" - } - } - } - e2e.go:86: Waiting for e2e-channelchain-subs21 to be deleted - e2e.go:86: Cleaning resource: "e2e-channelchain-subs12" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Subscription", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:38Z", - "finalizers": [ - "subscription-controller" - ], - "generation": 1, - "name": "e2e-channelchain-subs12", - "namespace": "testchannelchain-in-memory-channel", - "resourceVersion": "2720636", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testchannelchain-in-memory-channel/subscriptions/e2e-channelchain-subs12", - "uid": "2112feae-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "channel": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "name": "e2e-channelchain1" - }, - "reply": { - "channel": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "name": "e2e-channelchain2" - } - } - }, - "status": { - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:40Z", - "status": "True", - "type": "ChannelReady" - }, - { - "lastTransitionTime": "2019-04-18T22:41:40Z", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2019-04-18T22:41:39Z", - "status": "True", - "type": "Resolved" - } - ], - "physicalSubscription": { - "replyURI": "http://e2e-channelchain2-channel-bf7nd.testchannelchain-in-memory-channel.svc.cluster.local/" - } - } - } - e2e.go:86: Waiting for e2e-channelchain-subs12 to be deleted - e2e.go:86: Cleaning resource: "e2e-channelchain-subs11" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Subscription", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:38Z", - "finalizers": [ - "subscription-controller" - ], - "generation": 1, - "name": "e2e-channelchain-subs11", - "namespace": "testchannelchain-in-memory-channel", - "resourceVersion": "2720633", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testchannelchain-in-memory-channel/subscriptions/e2e-channelchain-subs11", - "uid": "210bdfc6-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "channel": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "name": "e2e-channelchain1" - }, - "reply": { - "channel": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "name": "e2e-channelchain2" - } - } - }, - "status": { - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:40Z", - "status": "True", - "type": "ChannelReady" - }, - { - "lastTransitionTime": "2019-04-18T22:41:40Z", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2019-04-18T22:41:39Z", - "status": "True", - "type": "Resolved" - } - ], - "physicalSubscription": { - "replyURI": "http://e2e-channelchain2-channel-bf7nd.testchannelchain-in-memory-channel.svc.cluster.local/" - } - } - } - e2e.go:86: Waiting for e2e-channelchain-subs11 to be deleted - e2e.go:86: Cleaning resource: "e2e-channelchain2" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:37Z", - "generation": 3, - "name": "e2e-channelchain2", - "namespace": "testchannelchain-in-memory-channel", - "resourceVersion": "2720965", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testchannelchain-in-memory-channel/channels/e2e-channelchain2", - "uid": "204eacdb-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "provisioner": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "ClusterChannelProvisioner", - "name": "in-memory-channel" - }, - "subscribable": {} - }, - "status": { - "address": { - "hostname": "e2e-channelchain2-channel-bf7nd.testchannelchain-in-memory-channel.svc.cluster.local" - }, - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:38Z", - "status": "True", - "type": "Addressable" - }, - { - "lastTransitionTime": "2019-04-18T22:41:38Z", - "status": "True", - "type": "Provisioned" - }, - { - "lastTransitionTime": "2019-04-18T22:41:38Z", - "status": "True", - "type": "ProvisionerInstalled" - }, - { - "lastTransitionTime": "2019-04-18T22:41:38Z", - "status": "True", - "type": "Ready" - } - ] - } - } - e2e.go:86: Waiting for e2e-channelchain2 to be deleted - e2e.go:86: Cleaning resource: "e2e-channelchain1" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:37Z", - "generation": 7, - "name": "e2e-channelchain1", - "namespace": "testchannelchain-in-memory-channel", - "resourceVersion": "2720979", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testchannelchain-in-memory-channel/channels/e2e-channelchain1", - "uid": "2045fbeb-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "provisioner": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "ClusterChannelProvisioner", - "name": "in-memory-channel" - }, - "subscribable": {} - }, - "status": { - "address": { - "hostname": "e2e-channelchain1-channel-w44lt.testchannelchain-in-memory-channel.svc.cluster.local" - }, - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:38Z", - "status": "True", - "type": "Addressable" - }, - { - "lastTransitionTime": "2019-04-18T22:41:38Z", - "status": "True", - "type": "Provisioned" - }, - { - "lastTransitionTime": "2019-04-18T22:41:38Z", - "status": "True", - "type": "ProvisionerInstalled" - }, - { - "lastTransitionTime": "2019-04-18T22:41:38Z", - "status": "True", - "type": "Ready" - } - ] - } - } - e2e.go:86: Waiting for e2e-channelchain1 to be deleted - e2e.go:86: Cleaning resource: "e2e-channelchain-logger-pod" - { - "apiVersion": "v1", - "kind": "Service", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:37Z", - "name": "e2e-channelchain-logger-pod", - "namespace": "testchannelchain-in-memory-channel", - "resourceVersion": "2720578", - "selfLink": "/api/v1/namespaces/testchannelchain-in-memory-channel/services/e2e-channelchain-logger-pod", - "uid": "20392e54-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "clusterIP": "10.19.247.203", - "ports": [ - { - "name": "http", - "port": 80, - "protocol": "TCP", - "targetPort": 8080 - } - ], - "selector": { - "e2etest": "1977bfa0-622b-11e9-8c26-acde48001122" - }, - "sessionAffinity": "None", - "type": "ClusterIP" - }, - "status": { - "loadBalancer": {} - } - } - e2e.go:86: Waiting for e2e-channelchain-logger-pod to be deleted - e2e.go:86: Cleaning resource: "e2e-channelchain-logger-pod" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:41:26Z", - "labels": { - "e2etest": "1977bfa0-622b-11e9-8c26-acde48001122" - }, - "name": "e2e-channelchain-logger-pod", - "namespace": "testchannelchain-in-memory-channel", - "resourceVersion": "2720574", - "selfLink": "/api/v1/namespaces/testchannelchain-in-memory-channel/pods/e2e-channelchain-logger-pod", - "uid": "1984c095-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "image": "us.gcr.io/akashv-public/logevents:latest", - "imagePullPolicy": "Always", - "name": "logevents", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-k8k8f", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "e2e-channelchain-logger-pod.testchannelchain-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - }, - { - "name": "ISTIO_METAJSON_LABELS", - "value": "{\"e2etest\":\"1977bfa0-622b-11e9-8c26-acde48001122\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-k8k8f", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-k5z3", - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-k8k8f", - "secret": { - "defaultMode": 420, - "secretName": "default-token-k8k8f" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:29Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:32Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:32Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:26Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://d2484584e70a252fd8bed0a47836ff778b06f2ff9f0ace314f4aa4e6f2fe387c", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:29Z" - } - } - }, - { - "containerID": "docker://e74f2e77cf14d135d44c7b32d5acae1dd0d5aa41d2f725114c58d19e932b7832", - "image": "us.gcr.io/akashv-public/logevents:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/logevents@sha256:94e704c729126cbe49c56bc2ab53cc4d1c7b322a3c1a7a8e46191c9a29984733", - "lastState": {}, - "name": "logevents", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:29Z" - } - } - } - ], - "hostIP": "10.138.0.50", - "initContainerStatuses": [ - { - "containerID": "docker://59eea458f9a88c3efdd3386ef8b4867a0fa82c6c341070648366fff43429fd68", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://59eea458f9a88c3efdd3386ef8b4867a0fa82c6c341070648366fff43429fd68", - "exitCode": 0, - "finishedAt": "2019-04-18T22:41:28Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:41:27Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.0.48", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:41:26Z" - } - } - e2e.go:86: Waiting for e2e-channelchain-logger-pod to be deleted - --- PASS: TestMain/TestEventTransformation (130.43s) - e2e.go:86: Creating Namespace: testeventtransformation-in-memory-channel - event_transformation_test.go:56: creating subscriber pods - e2e.go:86: Pod "e2e-eventtransformation-transformation-pod" starts running - e2e.go:86: Pod "e2e-eventtransformation-logger-pod" starts running - event_transformation_test.go:79: Creating Channel and Subscription - e2e.go:86: Sending fake CloudEvent - e2e.go:86: Creating event sender pod - e2e.go:86: Sender pod starts running - e2e.go:86: Cleaning resource: "e2e-eventtransformation-sender" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:41:53Z", - "name": "e2e-eventtransformation-sender", - "namespace": "testeventtransformation-in-memory-channel", - "resourceVersion": "2720848", - "selfLink": "/api/v1/namespaces/testeventtransformation-in-memory-channel/pods/e2e-eventtransformation-sender", - "uid": "2a08e992-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "args": [ - "-event-id", - "", - "-event-type", - "dev.knative.test.event", - "-source", - "e2e-eventtransformation-sender", - "-data", - "{\"msg\":\"TestEventTransformation 2a049e60-622b-11e9-8c26-acde48001122\"}", - "-encoding", - "binary", - "-sink", - "http://e2e-eventtransformation1-channel-xrbs8.testeventtransformation-in-memory-channel.svc.cluster.local" - ], - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imagePullPolicy": "Always", - "name": "sendevent", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-72bts", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "e2e-eventtransformation-sender.testeventtransformation-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-72bts", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-vx0d", - "priority": 0, - "restartPolicy": "Never", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-72bts", - "secret": { - "defaultMode": 420, - "secretName": "default-token-72bts" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:57Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:58Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:58Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:54Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://308e469570c2415d1fb6467178d8e11f8708687fe29b5879115381af066bca37", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:57Z" - } - } - }, - { - "containerID": "docker://937bcbc85d67f26e45610ef12007d8dee2f95356d404188cf75d449baa210738", - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/sendevent@sha256:3626b282b2ced720497bcb6ff057d2db08d024fdd2eeb9bf89f7ef2b3f3750bb", - "lastState": {}, - "name": "sendevent", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:57Z" - } - } - } - ], - "hostIP": "10.138.0.48", - "initContainerStatuses": [ - { - "containerID": "docker://8f236fdb2d62a969c917140ae30b2712ccbd20740c5e2467f530db726abdd376", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://8f236fdb2d62a969c917140ae30b2712ccbd20740c5e2467f530db726abdd376", - "exitCode": 0, - "finishedAt": "2019-04-18T22:41:56Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:41:55Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.2.228", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:41:54Z" - } - } - e2e.go:86: Waiting for e2e-eventtransformation-sender to be deleted - e2e.go:86: Cleaning resource: "e2e-eventtransformation-subs22" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Subscription", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:46Z", - "finalizers": [ - "subscription-controller" - ], - "generation": 1, - "name": "e2e-eventtransformation-subs22", - "namespace": "testeventtransformation-in-memory-channel", - "resourceVersion": "2720801", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testeventtransformation-in-memory-channel/subscriptions/e2e-eventtransformation-subs22", - "uid": "259eee61-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "channel": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "name": "e2e-eventtransformation2" - }, - "subscriber": { - "ref": { - "apiVersion": "v1", - "kind": "Service", - "name": "e2e-eventtransformation-logger-pod" - } - } - }, - "status": { - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:53Z", - "status": "True", - "type": "ChannelReady" - }, - { - "lastTransitionTime": "2019-04-18T22:41:53Z", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2019-04-18T22:41:52Z", - "status": "True", - "type": "Resolved" - } - ], - "physicalSubscription": { - "subscriberURI": "http://e2e-eventtransformation-logger-pod.testeventtransformation-in-memory-channel.svc.cluster.local/" - } - } - } - e2e.go:86: Waiting for e2e-eventtransformation-subs22 to be deleted - e2e.go:86: Cleaning resource: "e2e-eventtransformation-subs21" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Subscription", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:46Z", - "finalizers": [ - "subscription-controller" - ], - "generation": 1, - "name": "e2e-eventtransformation-subs21", - "namespace": "testeventtransformation-in-memory-channel", - "resourceVersion": "2720793", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testeventtransformation-in-memory-channel/subscriptions/e2e-eventtransformation-subs21", - "uid": "2599c7e4-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "channel": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "name": "e2e-eventtransformation2" - }, - "subscriber": { - "ref": { - "apiVersion": "v1", - "kind": "Service", - "name": "e2e-eventtransformation-logger-pod" - } - } - }, - "status": { - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:52Z", - "status": "True", - "type": "ChannelReady" - }, - { - "lastTransitionTime": "2019-04-18T22:41:52Z", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2019-04-18T22:41:51Z", - "status": "True", - "type": "Resolved" - } - ], - "physicalSubscription": { - "subscriberURI": "http://e2e-eventtransformation-logger-pod.testeventtransformation-in-memory-channel.svc.cluster.local/" - } - } - } - e2e.go:86: Waiting for e2e-eventtransformation-subs21 to be deleted - e2e.go:86: Cleaning resource: "e2e-eventtransformation-subs12" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Subscription", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:46Z", - "finalizers": [ - "subscription-controller" - ], - "generation": 1, - "name": "e2e-eventtransformation-subs12", - "namespace": "testeventtransformation-in-memory-channel", - "resourceVersion": "2720791", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testeventtransformation-in-memory-channel/subscriptions/e2e-eventtransformation-subs12", - "uid": "258e2a05-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "channel": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "name": "e2e-eventtransformation1" - }, - "reply": { - "channel": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "name": "e2e-eventtransformation2" - } - }, - "subscriber": { - "ref": { - "apiVersion": "v1", - "kind": "Service", - "name": "e2e-eventtransformation-transformation-pod" - } - } - }, - "status": { - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:51Z", - "status": "True", - "type": "ChannelReady" - }, - { - "lastTransitionTime": "2019-04-18T22:41:51Z", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2019-04-18T22:41:51Z", - "status": "True", - "type": "Resolved" - } - ], - "physicalSubscription": { - "replyURI": "http://e2e-eventtransformation2-channel-qwd72.testeventtransformation-in-memory-channel.svc.cluster.local/", - "subscriberURI": "http://e2e-eventtransformation-transformation-pod.testeventtransformation-in-memory-channel.svc.cluster.local/" - } - } - } - e2e.go:86: Waiting for e2e-eventtransformation-subs12 to be deleted - e2e.go:86: Cleaning resource: "e2e-eventtransformation-subs11" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Subscription", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:46Z", - "finalizers": [ - "subscription-controller" - ], - "generation": 1, - "name": "e2e-eventtransformation-subs11", - "namespace": "testeventtransformation-in-memory-channel", - "resourceVersion": "2720780", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testeventtransformation-in-memory-channel/subscriptions/e2e-eventtransformation-subs11", - "uid": "2588570d-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "channel": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "name": "e2e-eventtransformation1" - }, - "reply": { - "channel": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "name": "e2e-eventtransformation2" - } - }, - "subscriber": { - "ref": { - "apiVersion": "v1", - "kind": "Service", - "name": "e2e-eventtransformation-transformation-pod" - } - } - }, - "status": { - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:50Z", - "status": "True", - "type": "ChannelReady" - }, - { - "lastTransitionTime": "2019-04-18T22:41:50Z", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2019-04-18T22:41:49Z", - "status": "True", - "type": "Resolved" - } - ], - "physicalSubscription": { - "replyURI": "http://e2e-eventtransformation2-channel-qwd72.testeventtransformation-in-memory-channel.svc.cluster.local/", - "subscriberURI": "http://e2e-eventtransformation-transformation-pod.testeventtransformation-in-memory-channel.svc.cluster.local/" - } - } - } - e2e.go:86: Waiting for e2e-eventtransformation-subs11 to be deleted - e2e.go:86: Cleaning resource: "e2e-eventtransformation2" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:45Z", - "generation": 6, - "name": "e2e-eventtransformation2", - "namespace": "testeventtransformation-in-memory-channel", - "resourceVersion": "2721072", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testeventtransformation-in-memory-channel/channels/e2e-eventtransformation2", - "uid": "24be1b1a-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "provisioner": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "ClusterChannelProvisioner", - "name": "in-memory-channel" - }, - "subscribable": {} - }, - "status": { - "address": { - "hostname": "e2e-eventtransformation2-channel-qwd72.testeventtransformation-in-memory-channel.svc.cluster.local" - }, - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:46Z", - "status": "True", - "type": "Addressable" - }, - { - "lastTransitionTime": "2019-04-18T22:41:46Z", - "status": "True", - "type": "Provisioned" - }, - { - "lastTransitionTime": "2019-04-18T22:41:46Z", - "status": "True", - "type": "ProvisionerInstalled" - }, - { - "lastTransitionTime": "2019-04-18T22:41:46Z", - "status": "True", - "type": "Ready" - } - ] - } - } - e2e.go:86: Waiting for e2e-eventtransformation2 to be deleted - e2e.go:86: Cleaning resource: "e2e-eventtransformation1" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Channel", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:45Z", - "generation": 6, - "name": "e2e-eventtransformation1", - "namespace": "testeventtransformation-in-memory-channel", - "resourceVersion": "2721082", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testeventtransformation-in-memory-channel/channels/e2e-eventtransformation1", - "uid": "24b2f58a-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "provisioner": { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "ClusterChannelProvisioner", - "name": "in-memory-channel" - }, - "subscribable": {} - }, - "status": { - "address": { - "hostname": "e2e-eventtransformation1-channel-xrbs8.testeventtransformation-in-memory-channel.svc.cluster.local" - }, - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:46Z", - "status": "True", - "type": "Addressable" - }, - { - "lastTransitionTime": "2019-04-18T22:41:46Z", - "status": "True", - "type": "Provisioned" - }, - { - "lastTransitionTime": "2019-04-18T22:41:46Z", - "status": "True", - "type": "ProvisionerInstalled" - }, - { - "lastTransitionTime": "2019-04-18T22:41:46Z", - "status": "True", - "type": "Ready" - } - ] - } - } - e2e.go:86: Waiting for e2e-eventtransformation1 to be deleted - e2e.go:86: Cleaning resource: "e2e-eventtransformation-logger-pod" - { - "apiVersion": "v1", - "kind": "Service", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:44Z", - "name": "e2e-eventtransformation-logger-pod", - "namespace": "testeventtransformation-in-memory-channel", - "resourceVersion": "2720713", - "selfLink": "/api/v1/namespaces/testeventtransformation-in-memory-channel/services/e2e-eventtransformation-logger-pod", - "uid": "24961f45-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "clusterIP": "10.19.249.123", - "ports": [ - { - "name": "http", - "port": 80, - "protocol": "TCP", - "targetPort": 8080 - } - ], - "selector": { - "e2etest": "216fc28e-622b-11e9-8c26-acde48001122" - }, - "sessionAffinity": "None", - "type": "ClusterIP" - }, - "status": { - "loadBalancer": {} - } - } - e2e.go:86: Waiting for e2e-eventtransformation-logger-pod to be deleted - e2e.go:86: Cleaning resource: "e2e-eventtransformation-logger-pod" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:41:39Z", - "labels": { - "e2etest": "216fc28e-622b-11e9-8c26-acde48001122" - }, - "name": "e2e-eventtransformation-logger-pod", - "namespace": "testeventtransformation-in-memory-channel", - "resourceVersion": "2720757", - "selfLink": "/api/v1/namespaces/testeventtransformation-in-memory-channel/pods/e2e-eventtransformation-logger-pod", - "uid": "2174df7f-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "image": "us.gcr.io/akashv-public/logevents:latest", - "imagePullPolicy": "Always", - "name": "logevents", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-72bts", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "e2e-eventtransformation-logger-pod.testeventtransformation-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - }, - { - "name": "ISTIO_METAJSON_LABELS", - "value": "{\"e2etest\":\"216fc28e-622b-11e9-8c26-acde48001122\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-72bts", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-k5z3", - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-72bts", - "secret": { - "defaultMode": 420, - "secretName": "default-token-72bts" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:43Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:46Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:46Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:39Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://8aafd2fda361cb1a7dbd92b487b8ce6da28c68a9a24d29497628e38e5794b378", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:44Z" - } - } - }, - { - "containerID": "docker://fced340bb00c0101848b6635dd35a73760382178e9dc0605b2552a6613622c4f", - "image": "us.gcr.io/akashv-public/logevents:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/logevents@sha256:94e704c729126cbe49c56bc2ab53cc4d1c7b322a3c1a7a8e46191c9a29984733", - "lastState": {}, - "name": "logevents", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:44Z" - } - } - } - ], - "hostIP": "10.138.0.50", - "initContainerStatuses": [ - { - "containerID": "docker://a272958362cdcc7ac257c54560482a74639f44f3be3a460f1822042bd3b25124", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://a272958362cdcc7ac257c54560482a74639f44f3be3a460f1822042bd3b25124", - "exitCode": 0, - "finishedAt": "2019-04-18T22:41:42Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:41:41Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.0.52", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:41:39Z" - } - } - e2e.go:86: Waiting for e2e-eventtransformation-logger-pod to be deleted - e2e.go:86: Cleaning resource: "e2e-eventtransformation-transformation-pod" - { - "apiVersion": "v1", - "kind": "Service", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:39Z", - "name": "e2e-eventtransformation-transformation-pod", - "namespace": "testeventtransformation-in-memory-channel", - "resourceVersion": "2720618", - "selfLink": "/api/v1/namespaces/testeventtransformation-in-memory-channel/services/e2e-eventtransformation-transformation-pod", - "uid": "2166c47d-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "clusterIP": "10.19.250.6", - "ports": [ - { - "name": "http", - "port": 80, - "protocol": "TCP", - "targetPort": 8080 - } - ], - "selector": { - "e2etest": "1977be60-622b-11e9-8c26-acde48001122" - }, - "sessionAffinity": "None", - "type": "ClusterIP" - }, - "status": { - "loadBalancer": {} - } - } - e2e.go:86: Waiting for e2e-eventtransformation-transformation-pod to be deleted - e2e.go:86: Cleaning resource: "e2e-eventtransformation-transformation-pod" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:41:26Z", - "labels": { - "e2etest": "1977be60-622b-11e9-8c26-acde48001122" - }, - "name": "e2e-eventtransformation-transformation-pod", - "namespace": "testeventtransformation-in-memory-channel", - "resourceVersion": "2720593", - "selfLink": "/api/v1/namespaces/testeventtransformation-in-memory-channel/pods/e2e-eventtransformation-transformation-pod", - "uid": "198323d1-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "args": [ - "-msg-postfix", - "194c0568-622b-11e9-8c26-acde48001122" - ], - "image": "us.gcr.io/akashv-public/transformevents:latest", - "imagePullPolicy": "Always", - "name": "transformevents", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-72bts", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "e2e-eventtransformation-transformation-pod.testeventtransformation-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - }, - { - "name": "ISTIO_METAJSON_LABELS", - "value": "{\"e2etest\":\"1977be60-622b-11e9-8c26-acde48001122\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-72bts", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-k5z3", - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-72bts", - "secret": { - "defaultMode": 420, - "secretName": "default-token-72bts" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:29Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:31Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:31Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:26Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://f368504ff1c0b47ca0808e5a1f82c3573e9aaf97553ac175cc50b13850673f35", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:29Z" - } - } - }, - { - "containerID": "docker://f7d5cb2faf27a866d019e2e8550d9c540df814790713db54a0f1180a17f1081c", - "image": "us.gcr.io/akashv-public/transformevents:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/transformevents@sha256:b3f8a1d064f42f01573626a676f683243e2ce58beea985b384277c6c61fb200a", - "lastState": {}, - "name": "transformevents", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:29Z" - } - } - } - ], - "hostIP": "10.138.0.50", - "initContainerStatuses": [ - { - "containerID": "docker://7539d3c00cb07653e2d7f29e8de3e33283a4add02dcf005c66b6658e476eb13b", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://7539d3c00cb07653e2d7f29e8de3e33283a4add02dcf005c66b6658e476eb13b", - "exitCode": 0, - "finishedAt": "2019-04-18T22:41:29Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:41:28Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.0.47", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:41:26Z" - } - } - e2e.go:86: Waiting for e2e-eventtransformation-transformation-pod to be deleted - --- PASS: TestMain/TestDefaultBrokerWithManyTriggers (298.64s) - e2e.go:86: Creating Namespace: testdefaultbrokerwithmanytriggers-in-memory-channel - broker_trigger_test.go:62: Labeling namespace testdefaultbrokerwithmanytriggers-in-memory-channel - broker_trigger_test.go:68: Namespace testdefaultbrokerwithmanytriggers-in-memory-channel annotated - broker_trigger_test.go:71: Waiting for default broker to be ready - broker_trigger_test.go:79: Default broker ready: "http://default-broker.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local" - broker_trigger_test.go:90: Creating Subscriber pods - broker_trigger_test.go:103: Subscriber pods created - broker_trigger_test.go:105: Waiting for subscriber pods to become running - broker_trigger_test.go:112: Subscriber pods running - broker_trigger_test.go:114: Creating Subscriber services - broker_trigger_test.go:124: Subscriber services created - broker_trigger_test.go:126: Creating Triggers - broker_trigger_test.go:146: Triggers created - broker_trigger_test.go:148: Waiting for triggers to become ready - broker_trigger_test.go:155: Triggers ready - broker_trigger_test.go:168: Waiting for filter and ingress pods to become running - broker_trigger_test.go:171: Creating event sender pods - e2e.go:86: Event sources mismatch, receive source1, send source2 - e2e.go:86: Event sources mismatch, receive source1, send source2 - e2e.go:86: Event types mismatch, receive type1, send type2 - e2e.go:86: Event types mismatch, receive type1, send type2 - e2e.go:86: Event types mismatch, receive type1, send type2 - e2e.go:86: Event sources mismatch, receive source1, send source2 - e2e.go:86: Event types mismatch, receive type1, send type2 - broker_trigger_test.go:205: Event sender pods created. Waiting for them to be running - broker_trigger_test.go:212: Event sender pods running. Verifying events delivered to appropriate dumpers - broker_trigger_test.go:218: Dumper "dumper-testany-testany" expecting "Body-type1-source1,Body-type1-source2,Body-type2-source1,Body-type2-source2" - e2e.go:86: Found content "Body-type1-source1" for dumper-testany-testany/logevents in logs "2019/04/18 22:41:36 will listen on :8080\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source1\n id: f6d44cdc-3b90-4732-a5cd-d8994ba22da7\n time: 2019-04-18T22:42:20.94615988Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source1\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type2\n source: source1\n id: 65224332-1d66-4a39-8c00-a3b1659f480b\n time: 2019-04-18T22:42:21.163128588Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type2-source1\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source2\n id: f8870e97-0654-489e-a436-3df45013ddcf\n time: 2019-04-18T22:42:21.432669802Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source2\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:22 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type2\n source: source2\n id: 3f925289-dd79-4515-9312-c345e567e605\n time: 2019-04-18T22:42:22.062918491Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type2-source2\",\n \"sequence\": \"1\"\n }\n" - e2e.go:86: Found content "Body-type1-source2" for dumper-testany-testany/logevents in logs "2019/04/18 22:41:36 will listen on :8080\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source1\n id: f6d44cdc-3b90-4732-a5cd-d8994ba22da7\n time: 2019-04-18T22:42:20.94615988Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source1\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type2\n source: source1\n id: 65224332-1d66-4a39-8c00-a3b1659f480b\n time: 2019-04-18T22:42:21.163128588Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type2-source1\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source2\n id: f8870e97-0654-489e-a436-3df45013ddcf\n time: 2019-04-18T22:42:21.432669802Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source2\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:22 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type2\n source: source2\n id: 3f925289-dd79-4515-9312-c345e567e605\n time: 2019-04-18T22:42:22.062918491Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type2-source2\",\n \"sequence\": \"1\"\n }\n" - e2e.go:86: Found content "Body-type2-source1" for dumper-testany-testany/logevents in logs "2019/04/18 22:41:36 will listen on :8080\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source1\n id: f6d44cdc-3b90-4732-a5cd-d8994ba22da7\n time: 2019-04-18T22:42:20.94615988Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source1\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type2\n source: source1\n id: 65224332-1d66-4a39-8c00-a3b1659f480b\n time: 2019-04-18T22:42:21.163128588Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type2-source1\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source2\n id: f8870e97-0654-489e-a436-3df45013ddcf\n time: 2019-04-18T22:42:21.432669802Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source2\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:22 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type2\n source: source2\n id: 3f925289-dd79-4515-9312-c345e567e605\n time: 2019-04-18T22:42:22.062918491Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type2-source2\",\n \"sequence\": \"1\"\n }\n" - e2e.go:86: Found content "Body-type2-source2" for dumper-testany-testany/logevents in logs "2019/04/18 22:41:36 will listen on :8080\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source1\n id: f6d44cdc-3b90-4732-a5cd-d8994ba22da7\n time: 2019-04-18T22:42:20.94615988Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source1\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type2\n source: source1\n id: 65224332-1d66-4a39-8c00-a3b1659f480b\n time: 2019-04-18T22:42:21.163128588Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type2-source1\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source2\n id: f8870e97-0654-489e-a436-3df45013ddcf\n time: 2019-04-18T22:42:21.432669802Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source2\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:22 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type2\n source: source2\n id: 3f925289-dd79-4515-9312-c345e567e605\n time: 2019-04-18T22:42:22.062918491Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type2-source2\",\n \"sequence\": \"1\"\n }\n" - broker_trigger_test.go:218: Dumper "dumper-type1-testany" expecting "Body-type1-source1,Body-type1-source2" - e2e.go:86: Found content "Body-type1-source1" for dumper-type1-testany/logevents in logs "2019/04/18 22:41:34 will listen on :8080\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source1\n id: f6d44cdc-3b90-4732-a5cd-d8994ba22da7\n time: 2019-04-18T22:42:20.94615988Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source1\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source2\n id: f8870e97-0654-489e-a436-3df45013ddcf\n time: 2019-04-18T22:42:21.432669802Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source2\",\n \"sequence\": \"1\"\n }\n" - e2e.go:86: Found content "Body-type1-source2" for dumper-type1-testany/logevents in logs "2019/04/18 22:41:34 will listen on :8080\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source1\n id: f6d44cdc-3b90-4732-a5cd-d8994ba22da7\n time: 2019-04-18T22:42:20.94615988Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source1\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source2\n id: f8870e97-0654-489e-a436-3df45013ddcf\n time: 2019-04-18T22:42:21.432669802Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source2\",\n \"sequence\": \"1\"\n }\n" - broker_trigger_test.go:218: Dumper "dumper-testany-source1" expecting "Body-type1-source1,Body-type2-source1" - e2e.go:86: Found content "Body-type1-source1" for dumper-testany-source1/logevents in logs "2019/04/18 22:41:34 will listen on :8080\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source1\n id: f6d44cdc-3b90-4732-a5cd-d8994ba22da7\n time: 2019-04-18T22:42:20.94615988Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source1\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type2\n source: source1\n id: 65224332-1d66-4a39-8c00-a3b1659f480b\n time: 2019-04-18T22:42:21.163128588Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type2-source1\",\n \"sequence\": \"1\"\n }\n" - e2e.go:86: Found content "Body-type2-source1" for dumper-testany-source1/logevents in logs "2019/04/18 22:41:34 will listen on :8080\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source1\n id: f6d44cdc-3b90-4732-a5cd-d8994ba22da7\n time: 2019-04-18T22:42:20.94615988Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source1\",\n \"sequence\": \"1\"\n }\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type2\n source: source1\n id: 65224332-1d66-4a39-8c00-a3b1659f480b\n time: 2019-04-18T22:42:21.163128588Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type2-source1\",\n \"sequence\": \"1\"\n }\n" - broker_trigger_test.go:218: Dumper "dumper-type1-source1" expecting "Body-type1-source1" - e2e.go:86: Found content "Body-type1-source1" for dumper-type1-source1/logevents in logs "2019/04/18 22:41:34 will listen on :8080\n2019/04/18 22:42:21 Validation: valid\nContext Attributes,\n specversion: 0.2\n type: type1\n source: source1\n id: f6d44cdc-3b90-4732-a5cd-d8994ba22da7\n time: 2019-04-18T22:42:20.94615988Z\n contenttype: application/json\nExtensions,\n knativehistory: default-broker-8gtbw-channel-56964.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local\nData,\n {\n \"msg\": \"Body-type1-source1\",\n \"sequence\": \"1\"\n }\n" - e2e.go:86: Cleaning resource: "sender-type2-source2" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:42:12Z", - "name": "sender-type2-source2", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2721010", - "selfLink": "/api/v1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/pods/sender-type2-source2", - "uid": "3534f7e9-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "args": [ - "-event-id", - "", - "-event-type", - "type2", - "-source", - "source2", - "-data", - "{\"msg\":\"Body-type2-source2\"}", - "-encoding", - "binary", - "-sink", - "http://default-broker.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local" - ], - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imagePullPolicy": "Always", - "name": "sendevent", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "sender-type2-source2.testdefaultbrokerwithmanytriggers-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-vx0d", - "priority": 0, - "restartPolicy": "Never", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-ch586", - "secret": { - "defaultMode": 420, - "secretName": "default-token-ch586" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:16Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:20Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:20Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:12Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://723709b9b3360c5f71d95166bb085c43c2754c59739301345d9c5d7f2bfaf50e", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:42:17Z" - } - } - }, - { - "containerID": "docker://8488fe6b94b76a09d077495da604a22462eb4a9d530460020f598609f9d09410", - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/sendevent@sha256:3626b282b2ced720497bcb6ff057d2db08d024fdd2eeb9bf89f7ef2b3f3750bb", - "lastState": {}, - "name": "sendevent", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:42:17Z" - } - } - } - ], - "hostIP": "10.138.0.48", - "initContainerStatuses": [ - { - "containerID": "docker://c0aebee5eeec44071bba105e3406f354afe880ab3346170e731596ae07223638", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://c0aebee5eeec44071bba105e3406f354afe880ab3346170e731596ae07223638", - "exitCode": 0, - "finishedAt": "2019-04-18T22:42:16Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:42:15Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.2.232", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:42:12Z" - } - } - e2e.go:86: Waiting for sender-type2-source2 to be deleted - e2e.go:86: Cleaning resource: "sender-type2-source1" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:42:12Z", - "name": "sender-type2-source1", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2721037", - "selfLink": "/api/v1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/pods/sender-type2-source1", - "uid": "352b23ff-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "args": [ - "-event-id", - "", - "-event-type", - "type2", - "-source", - "source1", - "-data", - "{\"msg\":\"Body-type2-source1\"}", - "-encoding", - "binary", - "-sink", - "http://default-broker.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local" - ], - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imagePullPolicy": "Always", - "name": "sendevent", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "sender-type2-source1.testdefaultbrokerwithmanytriggers-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-vx0d", - "priority": 0, - "restartPolicy": "Never", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-ch586", - "secret": { - "defaultMode": 420, - "secretName": "default-token-ch586" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:15Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:27Z", - "message": "containers with unready status: [sendevent]", - "reason": "ContainersNotReady", - "status": "False", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:27Z", - "message": "containers with unready status: [sendevent]", - "reason": "ContainersNotReady", - "status": "False", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:12Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://7a82c415cdd5dd2506990a7c9f5b482a404555d195e2df765f5f62db3eaa9755", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:42:16Z" - } - } - }, - { - "containerID": "docker://d1ff82d12882cd5a078ba801dee8195305e8375cb6c2fd63344a91141358375e", - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/sendevent@sha256:3626b282b2ced720497bcb6ff057d2db08d024fdd2eeb9bf89f7ef2b3f3750bb", - "lastState": {}, - "name": "sendevent", - "ready": false, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://d1ff82d12882cd5a078ba801dee8195305e8375cb6c2fd63344a91141358375e", - "exitCode": 0, - "finishedAt": "2019-04-18T22:42:26Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:42:16Z" - } - } - } - ], - "hostIP": "10.138.0.48", - "initContainerStatuses": [ - { - "containerID": "docker://14fb34e00b55264974ee62d87ea99e7915c057dd4ee9547602d9c48e1094ad0c", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://14fb34e00b55264974ee62d87ea99e7915c057dd4ee9547602d9c48e1094ad0c", - "exitCode": 0, - "finishedAt": "2019-04-18T22:42:15Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:42:14Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.2.231", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:42:12Z" - } - } - e2e.go:86: Waiting for sender-type2-source1 to be deleted - e2e.go:86: Cleaning resource: "sender-type1-source2" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:42:12Z", - "name": "sender-type1-source2", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2721036", - "selfLink": "/api/v1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/pods/sender-type1-source2", - "uid": "3521b35f-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "args": [ - "-event-id", - "", - "-event-type", - "type1", - "-source", - "source2", - "-data", - "{\"msg\":\"Body-type1-source2\"}", - "-encoding", - "binary", - "-sink", - "http://default-broker.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local" - ], - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imagePullPolicy": "Always", - "name": "sendevent", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "sender-type1-source2.testdefaultbrokerwithmanytriggers-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-vx0d", - "priority": 0, - "restartPolicy": "Never", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-ch586", - "secret": { - "defaultMode": 420, - "secretName": "default-token-ch586" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:15Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:27Z", - "message": "containers with unready status: [sendevent]", - "reason": "ContainersNotReady", - "status": "False", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:27Z", - "message": "containers with unready status: [sendevent]", - "reason": "ContainersNotReady", - "status": "False", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:12Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://fc731d3560a68110ad1a42ded0b1787bc0cbed454a93995d2a6112c22c80434d", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:42:16Z" - } - } - }, - { - "containerID": "docker://e01a5b3e647dd411f4bd2f05849e56602178107eb730172beed4d3f98bd19a63", - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/sendevent@sha256:3626b282b2ced720497bcb6ff057d2db08d024fdd2eeb9bf89f7ef2b3f3750bb", - "lastState": {}, - "name": "sendevent", - "ready": false, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://e01a5b3e647dd411f4bd2f05849e56602178107eb730172beed4d3f98bd19a63", - "exitCode": 0, - "finishedAt": "2019-04-18T22:42:26Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:42:16Z" - } - } - } - ], - "hostIP": "10.138.0.48", - "initContainerStatuses": [ - { - "containerID": "docker://4152a6ca43a7ce03593e7fad020a834b77289cd887a55744077e321fed486630", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://4152a6ca43a7ce03593e7fad020a834b77289cd887a55744077e321fed486630", - "exitCode": 0, - "finishedAt": "2019-04-18T22:42:15Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:42:14Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.2.230", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:42:12Z" - } - } - e2e.go:86: Waiting for sender-type1-source2 to be deleted - e2e.go:86: Cleaning resource: "sender-type1-source1" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:42:12Z", - "name": "sender-type1-source1", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2721021", - "selfLink": "/api/v1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/pods/sender-type1-source1", - "uid": "35181da8-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "args": [ - "-event-id", - "", - "-event-type", - "type1", - "-source", - "source1", - "-data", - "{\"msg\":\"Body-type1-source1\"}", - "-encoding", - "binary", - "-sink", - "http://default-broker.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local" - ], - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imagePullPolicy": "Always", - "name": "sendevent", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "sender-type1-source1.testdefaultbrokerwithmanytriggers-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-vx0d", - "priority": 0, - "restartPolicy": "Never", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-ch586", - "secret": { - "defaultMode": 420, - "secretName": "default-token-ch586" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:15Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:26Z", - "message": "containers with unready status: [sendevent]", - "reason": "ContainersNotReady", - "status": "False", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:26Z", - "message": "containers with unready status: [sendevent]", - "reason": "ContainersNotReady", - "status": "False", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:42:12Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://c2cfd704dc463b3a160e0a26a371ee70619ef18805ee61211276c4a782f7cf56", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:42:16Z" - } - } - }, - { - "containerID": "docker://e609452b93a0112cd33d13776878b00caa4dcdbeef30a07fb8537aae3835d306", - "image": "us.gcr.io/akashv-public/sendevent:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/sendevent@sha256:3626b282b2ced720497bcb6ff057d2db08d024fdd2eeb9bf89f7ef2b3f3750bb", - "lastState": {}, - "name": "sendevent", - "ready": false, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://e609452b93a0112cd33d13776878b00caa4dcdbeef30a07fb8537aae3835d306", - "exitCode": 0, - "finishedAt": "2019-04-18T22:42:25Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:42:15Z" - } - } - } - ], - "hostIP": "10.138.0.48", - "initContainerStatuses": [ - { - "containerID": "docker://165a377b39d0e55918ac8b4335d67e37407b83ba545b1d0a00ab2bca15d13c2b", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://165a377b39d0e55918ac8b4335d67e37407b83ba545b1d0a00ab2bca15d13c2b", - "exitCode": 0, - "finishedAt": "2019-04-18T22:42:14Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:42:13Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.2.229", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:42:12Z" - } - } - e2e.go:86: Waiting for sender-type1-source1 to be deleted - e2e.go:86: Cleaning resource: "trigger-type1-source1" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Trigger", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:42Z", - "generation": 1, - "name": "trigger-type1-source1", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2720684", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/triggers/trigger-type1-source1", - "uid": "23254a2d-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "broker": "default", - "filter": { - "sourceAndType": { - "source": "source1", - "type": "type1" - } - }, - "subscriber": { - "ref": { - "apiVersion": "v1", - "kind": "Service", - "name": "svc-type1-source1" - } - } - }, - "status": { - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "BrokerExists" - }, - { - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "Subscribed" - } - ], - "subscriberURI": "http://svc-type1-source1.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local/" - } - } - e2e.go:86: Waiting for trigger-type1-source1 to be deleted - e2e.go:86: Cleaning resource: "trigger-testany-source1" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Trigger", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:42Z", - "generation": 1, - "name": "trigger-testany-source1", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2720678", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/triggers/trigger-testany-source1", - "uid": "231bed21-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "broker": "default", - "filter": { - "sourceAndType": { - "source": "source1" - } - }, - "subscriber": { - "ref": { - "apiVersion": "v1", - "kind": "Service", - "name": "svc-testany-source1" - } - } - }, - "status": { - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "BrokerExists" - }, - { - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "Subscribed" - } - ], - "subscriberURI": "http://svc-testany-source1.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local/" - } - } - e2e.go:86: Waiting for trigger-testany-source1 to be deleted - e2e.go:86: Cleaning resource: "trigger-type1-testany" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Trigger", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:42Z", - "generation": 1, - "name": "trigger-type1-testany", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2720674", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/triggers/trigger-type1-testany", - "uid": "23161978-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "broker": "default", - "filter": { - "sourceAndType": { - "type": "type1" - } - }, - "subscriber": { - "ref": { - "apiVersion": "v1", - "kind": "Service", - "name": "svc-type1-testany" - } - } - }, - "status": { - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "BrokerExists" - }, - { - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "Subscribed" - } - ], - "subscriberURI": "http://svc-type1-testany.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local/" - } - } - e2e.go:86: Waiting for trigger-type1-testany to be deleted - e2e.go:86: Cleaning resource: "trigger-testany-testany" - { - "apiVersion": "eventing.knative.dev/v1alpha1", - "kind": "Trigger", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:42Z", - "generation": 1, - "name": "trigger-testany-testany", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2720671", - "selfLink": "/apis/eventing.knative.dev/v1alpha1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/triggers/trigger-testany-testany", - "uid": "23092625-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "broker": "default", - "filter": { - "sourceAndType": {} - }, - "subscriber": { - "ref": { - "apiVersion": "v1", - "kind": "Service", - "name": "svc-testany-testany" - } - } - }, - "status": { - "conditions": [ - { - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "BrokerExists" - }, - { - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "Ready" - }, - { - "lastTransitionTime": "2019-04-18T22:41:42Z", - "status": "True", - "type": "Subscribed" - } - ], - "subscriberURI": "http://svc-testany-testany.testdefaultbrokerwithmanytriggers-in-memory-channel.svc.cluster.local/" - } - } - e2e.go:86: Waiting for trigger-testany-testany to be deleted - e2e.go:86: Cleaning resource: "svc-type1-source1" - { - "apiVersion": "v1", - "kind": "Service", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:42Z", - "name": "svc-type1-source1", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2720667", - "selfLink": "/api/v1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/services/svc-type1-source1", - "uid": "2301cddc-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "clusterIP": "10.19.253.248", - "ports": [ - { - "name": "http", - "port": 80, - "protocol": "TCP", - "targetPort": 8080 - } - ], - "selector": { - "end2end-test-broker-trigger": "1bf8fe9c-622b-11e9-8c26-acde48001122" - }, - "sessionAffinity": "None", - "type": "ClusterIP" - }, - "status": { - "loadBalancer": {} - } - } - e2e.go:86: Waiting for svc-type1-source1 to be deleted - e2e.go:86: Cleaning resource: "svc-testany-source1" - { - "apiVersion": "v1", - "kind": "Service", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:42Z", - "name": "svc-testany-source1", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2720663", - "selfLink": "/api/v1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/services/svc-testany-source1", - "uid": "22fa5554-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "clusterIP": "10.19.246.172", - "ports": [ - { - "name": "http", - "port": 80, - "protocol": "TCP", - "targetPort": 8080 - } - ], - "selector": { - "end2end-test-broker-trigger": "1bf8fe42-622b-11e9-8c26-acde48001122" - }, - "sessionAffinity": "None", - "type": "ClusterIP" - }, - "status": { - "loadBalancer": {} - } - } - e2e.go:86: Waiting for svc-testany-source1 to be deleted - e2e.go:86: Cleaning resource: "svc-type1-testany" - { - "apiVersion": "v1", - "kind": "Service", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:42Z", - "name": "svc-type1-testany", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2720657", - "selfLink": "/api/v1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/services/svc-type1-testany", - "uid": "22f242c4-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "clusterIP": "10.19.251.249", - "ports": [ - { - "name": "http", - "port": 80, - "protocol": "TCP", - "targetPort": 8080 - } - ], - "selector": { - "end2end-test-broker-trigger": "1bf8fda2-622b-11e9-8c26-acde48001122" - }, - "sessionAffinity": "None", - "type": "ClusterIP" - }, - "status": { - "loadBalancer": {} - } - } - e2e.go:86: Waiting for svc-type1-testany to be deleted - e2e.go:86: Cleaning resource: "svc-testany-testany" - { - "apiVersion": "v1", - "kind": "Service", - "metadata": { - "creationTimestamp": "2019-04-18T22:41:42Z", - "name": "svc-testany-testany", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2720653", - "selfLink": "/api/v1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/services/svc-testany-testany", - "uid": "22ebb9d3-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "clusterIP": "10.19.247.200", - "ports": [ - { - "name": "http", - "port": 80, - "protocol": "TCP", - "targetPort": 8080 - } - ], - "selector": { - "end2end-test-broker-trigger": "1bf8fd16-622b-11e9-8c26-acde48001122" - }, - "sessionAffinity": "None", - "type": "ClusterIP" - }, - "status": { - "loadBalancer": {} - } - } - e2e.go:86: Waiting for svc-testany-testany to be deleted - e2e.go:86: Cleaning resource: "dumper-type1-source1" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:41:30Z", - "labels": { - "end2end-test-broker-trigger": "1bf8fe9c-622b-11e9-8c26-acde48001122" - }, - "name": "dumper-type1-source1", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2720645", - "selfLink": "/api/v1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/pods/dumper-type1-source1", - "uid": "1c28657d-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "image": "us.gcr.io/akashv-public/logevents:latest", - "imagePullPolicy": "Always", - "name": "logevents", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "dumper-type1-source1.testdefaultbrokerwithmanytriggers-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - }, - { - "name": "ISTIO_METAJSON_LABELS", - "value": "{\"end2end-test-broker-trigger\":\"1bf8fe9c-622b-11e9-8c26-acde48001122\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-vx0d", - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-ch586", - "secret": { - "defaultMode": 420, - "secretName": "default-token-ch586" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:33Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:36Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:36Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:30Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://0168637bf8ea6a0e6e5b84da05e0eb625df4cd78507f41671191d3b6b58eb550", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:34Z" - } - } - }, - { - "containerID": "docker://9ba1f2b48c74fe3ee9e71089cdd0ec17c5ea82915a78900c450568e18bd6450c", - "image": "us.gcr.io/akashv-public/logevents:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/logevents@sha256:94e704c729126cbe49c56bc2ab53cc4d1c7b322a3c1a7a8e46191c9a29984733", - "lastState": {}, - "name": "logevents", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:34Z" - } - } - } - ], - "hostIP": "10.138.0.48", - "initContainerStatuses": [ - { - "containerID": "docker://a31a7ce17d99ea39129733ed60bfba241f17e76a73a1763850a3b472752c6d54", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://a31a7ce17d99ea39129733ed60bfba241f17e76a73a1763850a3b472752c6d54", - "exitCode": 0, - "finishedAt": "2019-04-18T22:41:33Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:41:32Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.2.226", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:41:30Z" - } - } - e2e.go:86: Waiting for dumper-type1-source1 to be deleted - e2e.go:86: Cleaning resource: "dumper-testany-source1" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:41:30Z", - "labels": { - "end2end-test-broker-trigger": "1bf8fe42-622b-11e9-8c26-acde48001122" - }, - "name": "dumper-testany-source1", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2720591", - "selfLink": "/api/v1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/pods/dumper-testany-source1", - "uid": "1c1b6ef9-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "image": "us.gcr.io/akashv-public/logevents:latest", - "imagePullPolicy": "Always", - "name": "logevents", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "dumper-testany-source1.testdefaultbrokerwithmanytriggers-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - }, - { - "name": "ISTIO_METAJSON_LABELS", - "value": "{\"end2end-test-broker-trigger\":\"1bf8fe42-622b-11e9-8c26-acde48001122\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-p02p", - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-ch586", - "secret": { - "defaultMode": 420, - "secretName": "default-token-ch586" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:33Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:37Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:37Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:30Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://bec06e29847b5509223d93bbccdb92503c1a1d60c54eb51b095be304c9cd2518", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:34Z" - } - } - }, - { - "containerID": "docker://a33770524486244f7bd49e654702061c8490cdf952e7e91f07c7844a5dbf7059", - "image": "us.gcr.io/akashv-public/logevents:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/logevents@sha256:94e704c729126cbe49c56bc2ab53cc4d1c7b322a3c1a7a8e46191c9a29984733", - "lastState": {}, - "name": "logevents", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:34Z" - } - } - } - ], - "hostIP": "10.138.0.49", - "initContainerStatuses": [ - { - "containerID": "docker://3e539094a5c53a7d658af2e798dd091203142cf0807c5a3244f86a4131c87e6d", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://3e539094a5c53a7d658af2e798dd091203142cf0807c5a3244f86a4131c87e6d", - "exitCode": 0, - "finishedAt": "2019-04-18T22:41:32Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:41:31Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.1.103", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:41:30Z" - } - } - e2e.go:86: Waiting for dumper-testany-source1 to be deleted - e2e.go:86: Cleaning resource: "dumper-type1-testany" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:41:30Z", - "labels": { - "end2end-test-broker-trigger": "1bf8fda2-622b-11e9-8c26-acde48001122" - }, - "name": "dumper-type1-testany", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2720631", - "selfLink": "/api/v1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/pods/dumper-type1-testany", - "uid": "1c10be8b-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "image": "us.gcr.io/akashv-public/logevents:latest", - "imagePullPolicy": "Always", - "name": "logevents", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "dumper-type1-testany.testdefaultbrokerwithmanytriggers-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - }, - { - "name": "ISTIO_METAJSON_LABELS", - "value": "{\"end2end-test-broker-trigger\":\"1bf8fda2-622b-11e9-8c26-acde48001122\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-vx0d", - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-ch586", - "secret": { - "defaultMode": 420, - "secretName": "default-token-ch586" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:33Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:35Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:35Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:30Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://55aad11bf7668e66f2bf23e5fa11e6336e064244483e6b113472db9f1a374a1a", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:34Z" - } - } - }, - { - "containerID": "docker://eeaa40165b28fa4f1e4237cd56f5db2b283ddc904a6618562300a659a7f12b11", - "image": "us.gcr.io/akashv-public/logevents:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/logevents@sha256:94e704c729126cbe49c56bc2ab53cc4d1c7b322a3c1a7a8e46191c9a29984733", - "lastState": {}, - "name": "logevents", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:34Z" - } - } - } - ], - "hostIP": "10.138.0.48", - "initContainerStatuses": [ - { - "containerID": "docker://fb15b01861d0b314e05bdbb0012bdc43b2d89b94c017f740879af52a2056e67f", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://fb15b01861d0b314e05bdbb0012bdc43b2d89b94c017f740879af52a2056e67f", - "exitCode": 0, - "finishedAt": "2019-04-18T22:41:32Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:41:31Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.2.225", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:41:30Z" - } - } - e2e.go:86: Waiting for dumper-type1-testany to be deleted - e2e.go:86: Cleaning resource: "dumper-testany-testany" - { - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "annotations": { - "sidecar.istio.io/inject": "true", - "sidecar.istio.io/status": "{\"version\":\"b159c540eed2383d13abb7a44e9519b1c51cf1b2fa469878e22a452dfa6b3ba9\",\"initContainers\":[\"istio-init\"],\"containers\":[\"istio-proxy\"],\"volumes\":[\"istio-envoy\",\"istio-certs\"],\"imagePullSecrets\":null}" - }, - "creationTimestamp": "2019-04-18T22:41:30Z", - "labels": { - "end2end-test-broker-trigger": "1bf8fd16-622b-11e9-8c26-acde48001122" - }, - "name": "dumper-testany-testany", - "namespace": "testdefaultbrokerwithmanytriggers-in-memory-channel", - "resourceVersion": "2720630", - "selfLink": "/api/v1/namespaces/testdefaultbrokerwithmanytriggers-in-memory-channel/pods/dumper-testany-testany", - "uid": "1bfd7c46-622b-11e9-92f2-42010a8a0052" - }, - "spec": { - "containers": [ - { - "image": "us.gcr.io/akashv-public/logevents:latest", - "imagePullPolicy": "Always", - "name": "logevents", - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - }, - { - "args": [ - "proxy", - "sidecar", - "--domain", - "$(POD_NAMESPACE).svc.cluster.local", - "--configPath", - "/etc/istio/proxy", - "--binaryPath", - "/usr/local/bin/envoy", - "--serviceCluster", - "dumper-testany-testany.testdefaultbrokerwithmanytriggers-in-memory-channel", - "--drainDuration", - "45s", - "--parentShutdownDuration", - "1m0s", - "--discoveryAddress", - "istio-pilot.istio-system:15010", - "--zipkinAddress", - "zipkin.istio-system:9411", - "--connectTimeout", - "10s", - "--proxyAdminPort", - "15000", - "--concurrency", - "2", - "--controlPlaneAuthPolicy", - "NONE", - "--statusPort", - "15020", - "--applicationPorts", - "" - ], - "env": [ - { - "name": "POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "POD_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "INSTANCE_IP", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "status.podIP" - } - } - }, - { - "name": "ISTIO_META_POD_NAME", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.name" - } - } - }, - { - "name": "ISTIO_META_CONFIG_NAMESPACE", - "valueFrom": { - "fieldRef": { - "apiVersion": "v1", - "fieldPath": "metadata.namespace" - } - } - }, - { - "name": "ISTIO_META_INTERCEPTION_MODE", - "value": "REDIRECT" - }, - { - "name": "ISTIO_METAJSON_ANNOTATIONS", - "value": "{\"sidecar.istio.io/inject\":\"true\"}\n" - }, - { - "name": "ISTIO_METAJSON_LABELS", - "value": "{\"end2end-test-broker-trigger\":\"1bf8fd16-622b-11e9-8c26-acde48001122\"}\n" - } - ], - "image": "docker.io/istio/proxyv2:1.1.2", - "imagePullPolicy": "IfNotPresent", - "lifecycle": { - "preStop": { - "exec": { - "command": [ - "sleep", - "20" - ] - } - } - }, - "name": "istio-proxy", - "ports": [ - { - "containerPort": 15090, - "name": "http-envoy-prom", - "protocol": "TCP" - } - ], - "readinessProbe": { - "failureThreshold": 30, - "httpGet": { - "path": "/healthz/ready", - "port": 15020, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "periodSeconds": 2, - "successThreshold": 1, - "timeoutSeconds": 1 - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "128Mi" - }, - "requests": { - "cpu": "100m", - "memory": "128Mi" - } - }, - "securityContext": { - "procMount": "Default", - "readOnlyRootFilesystem": true, - "runAsUser": 1337 - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "volumeMounts": [ - { - "mountPath": "/etc/istio/proxy", - "name": "istio-envoy" - }, - { - "mountPath": "/etc/certs/", - "name": "istio-certs", - "readOnly": true - }, - { - "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", - "name": "default-token-ch586", - "readOnly": true - } - ] - } - ], - "dnsPolicy": "ClusterFirst", - "initContainers": [ - { - "args": [ - "-p", - "15001", - "-u", - "1337", - "-m", - "REDIRECT", - "-i", - "*", - "-x", - "", - "-b", - "", - "-d", - "15020" - ], - "image": "docker.io/istio/proxy_init:1.1.2", - "imagePullPolicy": "IfNotPresent", - "name": "istio-init", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - }, - "requests": { - "cpu": "10m", - "memory": "10Mi" - } - }, - "securityContext": { - "capabilities": { - "add": [ - "NET_ADMIN" - ] - }, - "procMount": "Default" - }, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File" - } - ], - "nodeName": "gke-kn-dev-2-default-pool-85a58e81-k5z3", - "priority": 0, - "restartPolicy": "Always", - "schedulerName": "default-scheduler", - "securityContext": {}, - "serviceAccount": "default", - "serviceAccountName": "default", - "terminationGracePeriodSeconds": 30, - "tolerations": [ - { - "effect": "NoExecute", - "key": "node.kubernetes.io/not-ready", - "operator": "Exists", - "tolerationSeconds": 300 - }, - { - "effect": "NoExecute", - "key": "node.kubernetes.io/unreachable", - "operator": "Exists", - "tolerationSeconds": 300 - } - ], - "volumes": [ - { - "name": "default-token-ch586", - "secret": { - "defaultMode": 420, - "secretName": "default-token-ch586" - } - }, - { - "emptyDir": { - "medium": "Memory" - }, - "name": "istio-envoy" - }, - { - "name": "istio-certs", - "secret": { - "defaultMode": 420, - "optional": true, - "secretName": "istio.default" - } - } - ] - }, - "status": { - "conditions": [ - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:35Z", - "status": "True", - "type": "Initialized" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:38Z", - "status": "True", - "type": "Ready" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:38Z", - "status": "True", - "type": "ContainersReady" - }, - { - "lastProbeTime": null, - "lastTransitionTime": "2019-04-18T22:41:30Z", - "status": "True", - "type": "PodScheduled" - } - ], - "containerStatuses": [ - { - "containerID": "docker://96cba72633dbee4b8071dc44f2dbf66342233261567d8672335ca5af3b40f913", - "image": "istio/proxyv2:1.1.2", - "imageID": "docker-pullable://istio/proxyv2@sha256:25ec45680ca9ef9bcba56d06d34c0a84ac0415966112e0a324924a61fb74a158", - "lastState": {}, - "name": "istio-proxy", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:36Z" - } - } - }, - { - "containerID": "docker://cb01a522bba5ad2fc569bcddc208880f41d6eddf112c8d7f669646159e4ae571", - "image": "us.gcr.io/akashv-public/logevents:latest", - "imageID": "docker-pullable://us.gcr.io/akashv-public/logevents@sha256:94e704c729126cbe49c56bc2ab53cc4d1c7b322a3c1a7a8e46191c9a29984733", - "lastState": {}, - "name": "logevents", - "ready": true, - "restartCount": 0, - "state": { - "running": { - "startedAt": "2019-04-18T22:41:36Z" - } - } - } - ], - "hostIP": "10.138.0.50", - "initContainerStatuses": [ - { - "containerID": "docker://6020efded88dba79d4504bbd66e4710bb2015bcda78e6159edf8102cab6ca013", - "image": "istio/proxy_init:1.1.2", - "imageID": "docker-pullable://istio/proxy_init@sha256:ae12e1cf004427d567ac3446f7a26c0e2b905ac1c7bf5a865b22e5a3749f81b5", - "lastState": {}, - "name": "istio-init", - "ready": true, - "restartCount": 0, - "state": { - "terminated": { - "containerID": "docker://6020efded88dba79d4504bbd66e4710bb2015bcda78e6159edf8102cab6ca013", - "exitCode": 0, - "finishedAt": "2019-04-18T22:41:34Z", - "reason": "Completed", - "startedAt": "2019-04-18T22:41:33Z" - } - } - } - ], - "phase": "Running", - "podIP": "10.16.0.51", - "qosClass": "Burstable", - "startTime": "2019-04-18T22:41:30Z" - } - } - e2e.go:86: Waiting for dumper-testany-testany to be deleted -PASS -ok github.com/knative/eventing/test/e2e 300.673s -Finished run, return code is 0 -XML report written to /var/folders/1w/8r5wqmss4gvg7n9xbl5z219800l4st/T/tmp.8lLTp3Ih/junit_XXXXXXXX.xml -Test log written to /var/folders/1w/8r5wqmss4gvg7n9xbl5z219800l4st/T/tmp.8lLTp3Ih/go_test_XXXXXXXX.log -************************************** -*** E2E TESTS PASSED *** -************************************** -======================================= -==== TEARING DOWN TEST ENVIRONMENT ==== -======================================= diff --git a/contrib/gcppubsub/config/gcppubsub.yaml b/contrib/gcppubsub/config/gcppubsub.yaml index 33382e241bb..6d462004be2 100644 --- a/contrib/gcppubsub/config/gcppubsub.yaml +++ b/contrib/gcppubsub/config/gcppubsub.yaml @@ -61,6 +61,14 @@ rules: verbs: - create - update + - apiGroups: + - "" # Core API Group. + resources: + - events + verbs: + - create + - patch + - update --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile.go b/contrib/gcppubsub/pkg/controller/channel/reconcile.go index 929688819b8..1def46d7fb9 100644 --- a/contrib/gcppubsub/pkg/controller/channel/reconcile.go +++ b/contrib/gcppubsub/pkg/controller/channel/reconcile.go @@ -20,17 +20,15 @@ import ( "context" "fmt" - "github.com/knative/eventing/pkg/apis/duck/v1alpha1" - ccpcontroller "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/clusterchannelprovisioner" pubsubutil "github.com/knative/eventing/contrib/gcppubsub/pkg/util" + "github.com/knative/eventing/pkg/apis/duck/v1alpha1" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/logging" util "github.com/knative/eventing/pkg/provisioners" "github.com/knative/eventing/pkg/reconciler/names" "go.uber.org/zap" "golang.org/x/oauth2/google" - "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" @@ -353,7 +351,7 @@ func (r *reconciler) planGcpResources(ctx context.Context, c *eventingv1alpha1.C return persist, newPCS, subsToSync, nil } -func (r *reconciler) createK8sService(ctx context.Context, c *eventingv1alpha1.Channel) (*v1.Service, error) { +func (r *reconciler) createK8sService(ctx context.Context, c *eventingv1alpha1.Channel) (*corev1.Service, error) { svc, err := util.CreateK8sService(ctx, r.client, c, util.ExternalService(c)) if err != nil { logging.FromContext(ctx).Info("Error creating the Channel's K8s Service", zap.Error(err)) diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go b/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go index d4bdebae22a..1a6dd3db175 100644 --- a/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go +++ b/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go @@ -22,15 +22,10 @@ import ( "fmt" "testing" - "k8s.io/apimachinery/pkg/types" - pubsubutil "github.com/knative/eventing/contrib/gcppubsub/pkg/util" - - "github.com/knative/eventing/pkg/apis/duck/v1alpha1" - - "github.com/knative/eventing/contrib/gcppubsub/pkg/util/testcreds" - "github.com/knative/eventing/contrib/gcppubsub/pkg/util/fakepubsub" + "github.com/knative/eventing/contrib/gcppubsub/pkg/util/testcreds" + "github.com/knative/eventing/pkg/apis/duck/v1alpha1" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" util "github.com/knative/eventing/pkg/provisioners" "github.com/knative/eventing/pkg/reconciler/names" @@ -42,6 +37,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" diff --git a/contrib/gcppubsub/pkg/controller/cmd/main.go b/contrib/gcppubsub/pkg/controller/cmd/main.go index 2391723361e..ce4961befc5 100644 --- a/contrib/gcppubsub/pkg/controller/cmd/main.go +++ b/contrib/gcppubsub/pkg/controller/cmd/main.go @@ -21,14 +21,13 @@ import ( "log" "os" - "github.com/knative/eventing/pkg/provisioners" - v1 "k8s.io/api/core/v1" - "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/channel" "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/clusterchannelprovisioner" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/provisioners" "github.com/knative/pkg/signals" "go.uber.org/zap" + v1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" ) diff --git a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go index 830a6da1746..a782ade4fa1 100644 --- a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go +++ b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go @@ -20,8 +20,6 @@ import ( "context" "flag" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/clusterchannelprovisioner" "github.com/knative/eventing/contrib/gcppubsub/pkg/dispatcher/dispatcher" "github.com/knative/eventing/contrib/gcppubsub/pkg/dispatcher/receiver" @@ -32,6 +30,7 @@ import ( "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // This is the main method for the GCP PubSub Channel dispatcher. It handles all the data-plane diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go index cdc3c83d934..d2475a634ec 100644 --- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go +++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go @@ -21,15 +21,13 @@ import ( "sync" "time" - "k8s.io/client-go/util/workqueue" - - "sigs.k8s.io/controller-runtime/pkg/event" - pubsubutil "github.com/knative/eventing/contrib/gcppubsub/pkg/util" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" "go.uber.org/zap" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/source" diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go index 41f264ad559..06ba607b401 100644 --- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go +++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go @@ -23,20 +23,17 @@ import ( "time" "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/channel" - - v1 "k8s.io/api/core/v1" - - "k8s.io/client-go/util/workqueue" - pubsubutil "github.com/knative/eventing/contrib/gcppubsub/pkg/util" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/provisioners" util "github.com/knative/eventing/pkg/provisioners" "go.uber.org/zap" + v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/reconcile" diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go index adb624df783..c23d1cd8305 100644 --- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go +++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go @@ -26,31 +26,25 @@ import ( "testing" "time" - "k8s.io/client-go/util/workqueue" - "github.com/knative/eventing/contrib/gcppubsub/pkg/util" - - "github.com/knative/eventing/pkg/provisioners" - - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - + "github.com/knative/eventing/contrib/gcppubsub/pkg/util/fakepubsub" "github.com/knative/eventing/contrib/gcppubsub/pkg/util/testcreds" "github.com/knative/eventing/pkg/apis/duck/v1alpha1" - "github.com/knative/eventing/pkg/utils" - - "github.com/knative/eventing/contrib/gcppubsub/pkg/util/fakepubsub" - eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/provisioners" controllertesting "github.com/knative/eventing/pkg/reconciler/testing" + "github.com/knative/eventing/pkg/utils" _ "github.com/knative/pkg/system/testing" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) const ( diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go index c0e4381ab67..7b79f6199f5 100644 --- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go +++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go @@ -23,13 +23,12 @@ import ( "sync" "sync/atomic" - "github.com/knative/eventing/pkg/channelwatcher" - "cloud.google.com/go/pubsub" "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/channel" "github.com/knative/eventing/contrib/gcppubsub/pkg/dispatcher/receiver/cache" "github.com/knative/eventing/contrib/gcppubsub/pkg/util" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/channelwatcher" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/provisioners" "go.uber.org/zap" @@ -79,7 +78,7 @@ func (r *Receiver) getChannelReferenceFromHost(host string) (provisioners.Channe chMap := r.getHostToChannelMap() cr, ok := chMap[host] if !ok { - return cr, fmt.Errorf("Invalid HostName:%s. HostName not found in any of the watched natss channels", host) + return cr, fmt.Errorf("Invalid HostName:%s. HostName not found in any of the watched gcp-pubsub channels", host) } return cr, nil } @@ -181,7 +180,7 @@ func (r *Receiver) setHostToChannelMap(hcMap map[string]provisioners.ChannelRefe r.hostToChannelMap.Store(hcMap) } -// UpdateHostToChannelMap will be called from the controller that watches natss channels. +// UpdateHostToChannelMap will be called from the controller that watches gcp-pubsub channels. // It will update internal hostToChannelMap which is used to resolve the hostHeader of the // incoming request to the correct ChannelReference in the receiver function. func (r *Receiver) UpdateHostToChannelMap(ctx context.Context) error { diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go index 395ac64b709..f850d26049a 100644 --- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go +++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go @@ -25,24 +25,19 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/knative/eventing/contrib/gcppubsub/pkg/util" + "github.com/knative/eventing/contrib/gcppubsub/pkg/util/fakepubsub" + "github.com/knative/eventing/contrib/gcppubsub/pkg/util/testcreds" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes/scheme" - - "github.com/knative/eventing/contrib/gcppubsub/pkg/util/fakepubsub" + controllertesting "github.com/knative/eventing/pkg/reconciler/testing" "go.uber.org/zap" - + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - + "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - - "github.com/knative/eventing/contrib/gcppubsub/pkg/util/testcreds" - controllertesting "github.com/knative/eventing/pkg/reconciler/testing" ) const ( diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go index 21a21f5bf04..65539da4326 100644 --- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go +++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go @@ -313,19 +313,10 @@ func (s *SubscriptionsSupervisor) setHostToChannelMap(hcMap map[string]provision // It will update internal hostToChannelMap which is used to resolve the hostHeader of the // incoming request to the correct ChannelReference in the receiver function. func (s *SubscriptionsSupervisor) UpdateHostToChannelMap(ctx context.Context, chanList []eventingv1alpha1.Channel) error { - hostToChanMap := make(map[string]provisioners.ChannelReference, len(chanList)) - for _, c := range chanList { - hostName := c.Status.Address.Hostname - if cr, ok := hostToChanMap[hostName]; ok { - return fmt.Errorf( - "Duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s", - hostName, - c.Namespace, - c.Name, - cr.Namespace, - cr.Name) - } - hostToChanMap[hostName] = provisioners.ChannelReference{Name: c.Name, Namespace: c.Namespace} + hostToChanMap, err := provisioners.NewHostNameToChannelRefMap(chanList) + if err != nil { + logging.FromContext(ctx).Info("UpdateHostToChannelMap: Error occured when creating the new hostToChannel map.", zap.Error(err)) + return err } s.setHostToChannelMap(hostToChanMap) logging.FromContext(ctx).Info("hostToChannelMap updated successfully.") From 34aa1ba2b7941da4480232661042dc3826d74415 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Thu, 25 Apr 2019 10:41:39 -0700 Subject: [PATCH 36/37] FInal changes before validating E2E tests --- config/200-controller-clusterrole.yaml | 7 ------- contrib/gcppubsub/config/gcppubsub.yaml | 3 ++- contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go | 2 +- contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go | 3 +-- pkg/provisioners/inmemory/channel/reconcile_test.go | 2 -- pkg/provisioners/inmemory/controller/main.go | 2 -- 6 files changed, 4 insertions(+), 15 deletions(-) diff --git a/config/200-controller-clusterrole.yaml b/config/200-controller-clusterrole.yaml index 44a8d3f1408..23437df3f5f 100644 --- a/config/200-controller-clusterrole.yaml +++ b/config/200-controller-clusterrole.yaml @@ -35,13 +35,6 @@ rules: - "patch" - "watch" - # Channels and Triggers both manipulate VirtualServices. - - apiGroups: - - "networking.istio.io" - resources: - - "virtualservices" - verbs: *everything - # Brokers and the namespace annotation controllers manipulate Deployments. - apiGroups: - "apps" diff --git a/contrib/gcppubsub/config/gcppubsub.yaml b/contrib/gcppubsub/config/gcppubsub.yaml index 6d462004be2..8737d3ced13 100644 --- a/contrib/gcppubsub/config/gcppubsub.yaml +++ b/contrib/gcppubsub/config/gcppubsub.yaml @@ -64,11 +64,12 @@ rules: - apiGroups: - "" # Core API Group. resources: - - events + - events verbs: - create - patch - update + --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go index 7b79f6199f5..35952c0f75f 100644 --- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go +++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go @@ -72,7 +72,7 @@ func (r *Receiver) newMessageReceiver() (*provisioners.MessageReceiver, error) { return provisioners.NewMessageReceiver( r.sendEventToTopic, r.logger.Sugar(), - provisioners.ResolveChannelFromHostHeader(provisioners.ResolveChannelFromHostFunc(r.getChannelReferenceFromHost))) + provisioners.ResolveChannelFromHostHeader(r.getChannelReferenceFromHost)) } func (r *Receiver) getChannelReferenceFromHost(host string) (provisioners.ChannelReference, error) { chMap := r.getHostToChannelMap() diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go index 65539da4326..8d900564b63 100644 --- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go +++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go @@ -76,6 +76,7 @@ func NewDispatcher(natssURL, clusterID string, logger *zap.Logger) (*Subscriptio clusterID: clusterID, subscriptions: make(map[provisioners.ChannelReference]map[subscriptionReference]*stan.Subscription), } + d.setHostToChannelMap(map[string]provisioners.ChannelReference{}) receiver, err := provisioners.NewMessageReceiver( createReceiverFunction(d, logger.Sugar()), logger.Sugar(), @@ -84,8 +85,6 @@ func NewDispatcher(natssURL, clusterID string, logger *zap.Logger) (*Subscriptio return nil, err } d.receiver = receiver - d.setHostToChannelMap(map[string]provisioners.ChannelReference{}) - return d, nil } diff --git a/pkg/provisioners/inmemory/channel/reconcile_test.go b/pkg/provisioners/inmemory/channel/reconcile_test.go index 76d75f3c7aa..e5af96d4dc2 100644 --- a/pkg/provisioners/inmemory/channel/reconcile_test.go +++ b/pkg/provisioners/inmemory/channel/reconcile_test.go @@ -32,7 +32,6 @@ import ( "github.com/knative/eventing/pkg/sidecar/fanout" "github.com/knative/eventing/pkg/sidecar/multichannelfanout" "github.com/knative/eventing/pkg/utils" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "github.com/knative/pkg/system" _ "github.com/knative/pkg/system/testing" "go.uber.org/zap" @@ -191,7 +190,6 @@ func init() { // Add types to scheme. _ = eventingv1alpha1.AddToScheme(scheme.Scheme) _ = corev1.AddToScheme(scheme.Scheme) - _ = istiov1alpha3.AddToScheme(scheme.Scheme) } func TestInjectClient(t *testing.T) { diff --git a/pkg/provisioners/inmemory/controller/main.go b/pkg/provisioners/inmemory/controller/main.go index 2b09c992b4f..6a1921d7c31 100644 --- a/pkg/provisioners/inmemory/controller/main.go +++ b/pkg/provisioners/inmemory/controller/main.go @@ -24,7 +24,6 @@ import ( eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" "github.com/knative/eventing/pkg/provisioners/inmemory/clusterchannelprovisioner" - istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "github.com/knative/pkg/signals" "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -50,7 +49,6 @@ func main() { // Add custom types to this array to get them into the manager's scheme. eventingv1alpha1.AddToScheme(mgr.GetScheme()) - istiov1alpha3.AddToScheme(mgr.GetScheme()) // The controllers for both the ClusterChannelProvisioner and the Channels created by that // ClusterChannelProvisioner run in this process. From 9724578136dac2e5566b212d49e8d531b1cc2297 Mon Sep 17 00:00:00 2001 From: Akash Verenkar Date: Fri, 26 Apr 2019 10:10:14 -0700 Subject: [PATCH 37/37] Updates based on PR comments --- contrib/gcppubsub/config/gcppubsub.yaml | 17 +++++++++++++++++ .../pkg/controller/channel/reconcile.go | 8 ++++---- contrib/gcppubsub/pkg/dispatcher/cmd/main.go | 2 +- .../pkg/dispatcher/dispatcher/controller.go | 2 +- .../pkg/dispatcher/dispatcher/reconcile.go | 6 +++--- .../pkg/dispatcher/dispatcher/reconcile_test.go | 2 +- .../pkg/dispatcher/receiver/receiver.go | 10 +++++----- 7 files changed, 32 insertions(+), 15 deletions(-) diff --git a/contrib/gcppubsub/config/gcppubsub.yaml b/contrib/gcppubsub/config/gcppubsub.yaml index 8737d3ced13..a1807aa9851 100644 --- a/contrib/gcppubsub/config/gcppubsub.yaml +++ b/contrib/gcppubsub/config/gcppubsub.yaml @@ -233,3 +233,20 @@ spec: - protocol: TCP port: 80 targetPort: 8080 + +--- +# Needed by the GCP PubSub Channel to communicate with GCP PubSub. +apiVersion: networking.istio.io/v1alpha3 +kind: ServiceEntry +metadata: + name: gcppubsub-bus-ext + namespace: knative-eventing +spec: + hosts: + - "*.googleapis.com" + - "accounts.google.com" + ports: + - number: 443 + name: https + protocol: HTTPS + location: MESH_EXTERNAL diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile.go b/contrib/gcppubsub/pkg/controller/channel/reconcile.go index 1def46d7fb9..42c9d5c6522 100644 --- a/contrib/gcppubsub/pkg/controller/channel/reconcile.go +++ b/contrib/gcppubsub/pkg/controller/channel/reconcile.go @@ -161,10 +161,10 @@ func ShouldReconcile(c *eventingv1alpha1.Channel) bool { func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) (bool, error) { c.Status.InitializeConditions() - // We are syncing four things: - // 1. The K8s Service to talk to this Channel. - // 2. The GCP PubSub Topic (one for the Channel). - // 3. The GCP PubSub Subscriptions (one for each Subscriber of the Channel). + // We are syncing the following: + // - The K8s Service to talk to this Channel. + // - The GCP PubSub Topic (one for the Channel). + // - The GCP PubSub Subscriptions (one for each Subscriber of the Channel). // First we will plan all the names out for steps 3 and 4 persist them to status.internal. Then, on a // subsequent reconcile, we manipulate all the GCP resources in steps 3 and 4. diff --git a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go index a782ade4fa1..4b03eb258c3 100644 --- a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go +++ b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go @@ -77,7 +77,7 @@ func main() { if _, err = dispatcher.New( mgr, logger.Desugar(), - []dispatcher.ReconcileHandlers{ + []dispatcher.ReconcileHandler{ func(ctx context.Context, _ reconcile.Request) error { return receiver.UpdateHostToChannelMap(ctx) }, diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go index d2475a634ec..781ea63b6b9 100644 --- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go +++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go @@ -51,7 +51,7 @@ const ( // New returns a Controller that represents the dispatcher portion (messages from GCP PubSub are // sent into the cluster) of the GCP PubSub dispatcher. We use a reconcile loop to watch all // Channels and notice changes to them. It uses an exponential backoff to throttle the retries. -func New(mgr manager.Manager, logger *zap.Logger, additionalHandlers []ReconcileHandlers) (controller.Controller, error) { +func New(mgr manager.Manager, logger *zap.Logger, additionalHandlers []ReconcileHandler) (controller.Controller, error) { // reconcileChan is used when the dispatcher itself needs to force reconciliation of a Channel. reconcileChan := make(chan event.GenericEvent) diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go index 06ba607b401..78c51756373 100644 --- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go +++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go @@ -52,8 +52,8 @@ type channelName = types.NamespacedName type subscriptionName = types.UID type empty struct{} -// ReconcileHandlers will be run by in addition to exiting reconcile -type ReconcileHandlers func(context.Context, reconcile.Request) error +// ReconcileHandler will be run by in addition to existing reconcile. +type ReconcileHandler func(context.Context, reconcile.Request) error // reconciler reconciles Channels with the gcp-pubsub provisioner. It sets up hanging polling for // every Subscription to any Channel. @@ -78,7 +78,7 @@ type reconciler struct { // rateLimiter is used to limit the pace at which we nack a message when it could not be dispatched. rateLimiter workqueue.RateLimiter - additionalHandlers []ReconcileHandlers + additionalHandlers []ReconcileHandler } // Verify the struct implements reconcile.Reconciler diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go index c23d1cd8305..6814dc6448e 100644 --- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go +++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go @@ -427,7 +427,7 @@ func TestReconcile(t *testing.T) { } } if tc.OtherTestData[additionalHandlerError] != nil { - r.additionalHandlers = []ReconcileHandlers{ + r.additionalHandlers = []ReconcileHandler{ func(_ context.Context, _ reconcile.Request) error { return fmt.Errorf(tc.OtherTestData[additionalHandlerError].(string)) }, diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go index 35952c0f75f..09117122e46 100644 --- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go +++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go @@ -78,7 +78,7 @@ func (r *Receiver) getChannelReferenceFromHost(host string) (provisioners.Channe chMap := r.getHostToChannelMap() cr, ok := chMap[host] if !ok { - return cr, fmt.Errorf("Invalid HostName:%s. HostName not found in any of the watched gcp-pubsub channels", host) + return cr, fmt.Errorf("Invalid HostName:%q. HostName not found in any of the watched gcp-pubsub channels", host) } return cr, nil } @@ -184,10 +184,10 @@ func (r *Receiver) setHostToChannelMap(hcMap map[string]provisioners.ChannelRefe // It will update internal hostToChannelMap which is used to resolve the hostHeader of the // incoming request to the correct ChannelReference in the receiver function. func (r *Receiver) UpdateHostToChannelMap(ctx context.Context) error { - logging.FromContext(ctx).Info("UpdateHostToChannelMap: Acquiring mutex lock") + logging.FromContext(ctx).Debug("UpdateHostToChannelMap: Acquiring mutex lock") r.hostToChannelMapMutex.Lock() defer r.hostToChannelMapMutex.Unlock() - logging.FromContext(ctx).Info("UpdateHostToChannelMap: Acquired mutex lock. Updating internal map") + logging.FromContext(ctx).Debug("UpdateHostToChannelMap: Acquired mutex lock. Updating internal map") chanList, err := channelwatcher.ListAllChannels(ctx, r.client, channel.ShouldReconcile) if err != nil { @@ -197,11 +197,11 @@ func (r *Receiver) UpdateHostToChannelMap(ctx context.Context) error { hostToChanMap, err := provisioners.NewHostNameToChannelRefMap(chanList) if err != nil { - logging.FromContext(ctx).Info("UpdateHostToChannelMap: Error occured when creating the new hostToChannel map.", zap.Error(err)) + logging.FromContext(ctx).Error("UpdateHostToChannelMap: Error occured when creating the new hostToChannel map.", zap.Error(err)) return err } r.setHostToChannelMap(hostToChanMap) - logging.FromContext(ctx).Info("UpdateHostToChannelMap: Update successful. Releasing mutex lock") + logging.FromContext(ctx).Info("UpdateHostToChannelMap: Update successful.") return nil }