diff --git a/contrib/kafka/cmd/channel_controller/main.go b/contrib/kafka/cmd/channel_controller/main.go new file mode 100644 index 00000000000..95212a0b8c8 --- /dev/null +++ b/contrib/kafka/cmd/channel_controller/main.go @@ -0,0 +1,191 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "github.com/knative/eventing/contrib/kafka/pkg/utils" + "log" + + clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + eventingScheme "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/scheme" + informers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions" + kafkachannel "github.com/knative/eventing/contrib/kafka/pkg/reconciler/controller" + "github.com/knative/eventing/pkg/logconfig" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/reconciler" + "github.com/knative/pkg/configmap" + kncontroller "github.com/knative/pkg/controller" + "github.com/knative/pkg/signals" + "github.com/knative/pkg/system" + "go.uber.org/zap" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +const ( + dispatcherDeploymentName = "kafka-ch-dispatcher" + dispatcherServiceName = "kafka-ch-dispatcher" +) + +var ( + hardcodedLoggingConfig = flag.Bool("hardCodedLoggingConfig", false, "If true, use the hard coded logging config. It is intended to be used only when debugging outside a Kubernetes cluster.") + masterURL = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") + kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") +) + +func main() { + flag.Parse() + logger, atomicLevel := setupLogger() + defer logger.Sync() + + // set up signals so we handle the first shutdown signal gracefully + stopCh := signals.SetupSignalHandler() + + cfg, err := clientcmd.BuildConfigFromFlags(*masterURL, *kubeconfig) + if err != nil { + logger.Fatalw("Error building kubeconfig", zap.Error(err)) + } + + // TODO the underlying config map needs to be watched and the config should be reloaded if there is a change. + kafkaConfig, err := utils.GetKafkaConfig("/etc/config-kafka") + if err != nil { + logger.Fatalw("Error loading kafka config", zap.Error(err)) + } + + logger = logger.With(zap.String("controller/impl", "pkg")) + logger.Info("Starting the Kafka controller") + + systemNS := system.Namespace() + + const numControllers = 1 + cfg.QPS = numControllers * rest.DefaultQPS + cfg.Burst = numControllers * rest.DefaultBurst + opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh) + // Setting up our own eventingClientSet as we need the messaging API introduced with kafka. + eventingClientSet := clientset.NewForConfigOrDie(cfg) + + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(opt.KubeClientSet, opt.ResyncPeriod) + eventingInformerFactory := informers.NewSharedInformerFactory(eventingClientSet, opt.ResyncPeriod) + + // Messaging + kafkaChannelInformer := eventingInformerFactory.Messaging().V1alpha1().KafkaChannels() + + // Kube + serviceInformer := kubeInformerFactory.Core().V1().Services() + endpointsInformer := kubeInformerFactory.Core().V1().Endpoints() + deploymentInformer := kubeInformerFactory.Apps().V1().Deployments() + + // Adding the scheme. + eventingScheme.AddToScheme(scheme.Scheme) + + // Build all of our controllers, with the clients constructed above. + // Add new controllers to this array. + // You also need to modify numControllers above to match this. + controllers := [...]*kncontroller.Impl{ + kafkachannel.NewController( + opt, + eventingClientSet, + kafkaConfig, + systemNS, + dispatcherDeploymentName, + dispatcherServiceName, + kafkaChannelInformer, + deploymentInformer, + serviceInformer, + endpointsInformer, + ), + } + // This line asserts at compile time that the length of controllers is equal to numControllers. + // It is based on https://go101.org/article/tips.html#assert-at-compile-time, which notes that + // var _ [N-M]int + // asserts at compile time that N >= M, which we can use to establish equality of N and M: + // (N >= M) && (M >= N) => (N == M) + var _ [numControllers - len(controllers)][len(controllers) - numControllers]int + + // Watch the logging config map and dynamically update logging levels. + opt.ConfigMapWatcher.Watch(logconfig.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.Controller)) + // TODO: Watch the observability config map and dynamically update metrics exporter. + //opt.ConfigMapWatcher.Watch(metrics.ObservabilityConfigName, metrics.UpdateExporterFromConfigMap(component, logger)) + if err := opt.ConfigMapWatcher.Start(stopCh); err != nil { + logger.Fatalw("failed to start configuration manager", zap.Error(err)) + } + + // Start all of the informers and wait for them to sync. + logger.Info("Starting informers.") + if err := kncontroller.StartInformers( + stopCh, + // Messaging + kafkaChannelInformer.Informer(), + + // Kube + serviceInformer.Informer(), + deploymentInformer.Informer(), + endpointsInformer.Informer(), + ); err != nil { + logger.Fatalf("Failed to start informers: %v", err) + } + + logger.Info("Starting controllers.") + kncontroller.StartAll(stopCh, controllers[:]...) +} + +func setupLogger() (*zap.SugaredLogger, zap.AtomicLevel) { + // Set up our logger. + loggingConfigMap := getLoggingConfigOrDie() + loggingConfig, err := logging.NewConfigFromMap(loggingConfigMap) + if err != nil { + log.Fatalf("Error parsing logging configuration: %v", err) + } + return logging.NewLoggerFromConfig(loggingConfig, logconfig.Controller) +} + +func getLoggingConfigOrDie() map[string]string { + if hardcodedLoggingConfig != nil && *hardcodedLoggingConfig { + return map[string]string{ + "loglevel.controller": "info", + "zap-logger-config": ` + { + "level": "info", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + }`, + } + } else { + cm, err := configmap.Load("/etc/config-logging") + if err != nil { + log.Fatalf("Error loading logging configuration: %v", err) + } + return cm + } +} diff --git a/contrib/kafka/cmd/channel_dispatcher/main.go b/contrib/kafka/cmd/channel_dispatcher/main.go new file mode 100644 index 00000000000..1c9c6f0b9f9 --- /dev/null +++ b/contrib/kafka/cmd/channel_dispatcher/main.go @@ -0,0 +1,179 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "github.com/knative/eventing/contrib/kafka/pkg/utils" + "log" + + clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + eventingScheme "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/scheme" + informers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions" + "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" + kafkachannel "github.com/knative/eventing/contrib/kafka/pkg/reconciler/dispatcher" + "github.com/knative/eventing/pkg/logconfig" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/reconciler" + "github.com/knative/pkg/configmap" + kncontroller "github.com/knative/pkg/controller" + "github.com/knative/pkg/signals" + "go.uber.org/zap" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +var ( + hardcodedLoggingConfig = flag.Bool("hardCodedLoggingConfig", false, "If true, use the hard coded logging config. It is intended to be used only when debugging outside a Kubernetes cluster.") + masterURL = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") + kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") +) + +func main() { + flag.Parse() + logger, atomicLevel := setupLogger() + defer logger.Sync() + + // set up signals so we handle the first shutdown signal gracefully + stopCh := signals.SetupSignalHandler() + + cfg, err := clientcmd.BuildConfigFromFlags(*masterURL, *kubeconfig) + if err != nil { + logger.Fatalw("Error building kubeconfig", zap.Error(err)) + } + + kafkaConfig, err := utils.GetKafkaConfig("/etc/config-kafka") + if err != nil { + logger.Fatalw("Error loading kafka config", zap.Error(err)) + } + + args := &dispatcher.KafkaDispatcherArgs{ + ClientID: "kafka-ch-dispatcher", + Brokers: kafkaConfig.Brokers, + ConsumerMode: kafkaConfig.ConsumerMode, + TopicFunc: utils.TopicName, + Logger: logger.Desugar(), + } + kafkaDispatcher, err := dispatcher.NewDispatcher(args) + if err != nil { + logger.Fatalw("Unable to create kafka dispatcher", zap.Error(err)) + } + + logger = logger.With(zap.String("controller/impl", "pkg")) + logger.Info("Starting the Kafka dispatcher") + + const numControllers = 1 + cfg.QPS = numControllers * rest.DefaultQPS + cfg.Burst = numControllers * rest.DefaultBurst + opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh) + // Setting up our own eventingClientSet as we need the messaging API introduced with kafka. + eventingClientSet := clientset.NewForConfigOrDie(cfg) + eventingInformerFactory := informers.NewSharedInformerFactory(eventingClientSet, opt.ResyncPeriod) + + // Messaging + kafkaChannelInformer := eventingInformerFactory.Messaging().V1alpha1().KafkaChannels() + + // Adding the scheme. + eventingScheme.AddToScheme(scheme.Scheme) + + // Build all of our controllers, with the clients constructed above. + // Add new controllers to this array. + // You also need to modify numControllers above to match this. + controllers := [...]*kncontroller.Impl{ + kafkachannel.NewController( + opt, + eventingClientSet, + kafkaDispatcher, + kafkaChannelInformer, + ), + } + // This line asserts at compile time that the length of controllers is equal to numControllers. + // It is based on https://go101.org/article/tips.html#assert-at-compile-time, which notes that + // var _ [N-M]int + // asserts at compile time that N >= M, which we can use to establish equality of N and M: + // (N >= M) && (M >= N) => (N == M) + var _ [numControllers - len(controllers)][len(controllers) - numControllers]int + + // Watch the logging config map and dynamically update logging levels. + opt.ConfigMapWatcher.Watch(logconfig.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.Controller)) + // TODO: Watch the observability config map and dynamically update metrics exporter. + //opt.ConfigMapWatcher.Watch(metrics.ObservabilityConfigName, metrics.UpdateExporterFromConfigMap(component, logger)) + if err := opt.ConfigMapWatcher.Start(stopCh); err != nil { + logger.Fatalw("failed to start configuration manager", zap.Error(err)) + } + + // Start all of the informers and wait for them to sync. + logger.Info("Starting informers.") + if err := kncontroller.StartInformers( + stopCh, + // Messaging + kafkaChannelInformer.Informer(), + ); err != nil { + logger.Fatalf("Failed to start informers: %v", err) + } + + logger.Info("Starting dispatcher.") + go kafkaDispatcher.Start(stopCh) + + logger.Info("Starting controllers.") + kncontroller.StartAll(stopCh, controllers[:]...) +} + +func setupLogger() (*zap.SugaredLogger, zap.AtomicLevel) { + // Set up our logger. + loggingConfigMap := getLoggingConfigOrDie() + loggingConfig, err := logging.NewConfigFromMap(loggingConfigMap) + if err != nil { + log.Fatalf("Error parsing logging configuration: %v", err) + } + return logging.NewLoggerFromConfig(loggingConfig, logconfig.Controller) +} + +func getLoggingConfigOrDie() map[string]string { + if hardcodedLoggingConfig != nil && *hardcodedLoggingConfig { + return map[string]string{ + "loglevel.controller": "info", + "zap-logger-config": ` + { + "level": "info", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + }`, + } + } else { + cm, err := configmap.Load("/etc/config-logging") + if err != nil { + log.Fatalf("Error loading logging configuration: %v", err) + } + return cm + } +} diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index 96d500834e7..7a1dd26cf41 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -1,7 +1,24 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package main import ( "flag" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "os" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). @@ -24,7 +41,7 @@ import ( type SchemeFunc func(*runtime.Scheme) error // ProvideFunc adds a controller to a Manager. -type ProvideFunc func(mgr manager.Manager, config *provisionerController.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) +type ProvideFunc func(mgr manager.Manager, config *utils.KafkaConfig, logger *zap.Logger) (controller.Controller, error) func main() { os.Exit(_main()) @@ -60,7 +77,7 @@ func _main() int { } // TODO the underlying config map needs to be watched and the config should be reloaded if there is a change. - provisionerConfig, err := provisionerController.GetProvisionerConfig("/etc/config-provisioner") + provisionerConfig, err := utils.GetKafkaConfig("/etc/config-provisioner") if err != nil { logger.Error(err, "unable to run controller manager") diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index b9d268d48d9..d1880527276 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -18,13 +18,15 @@ package main import ( "flag" + "fmt" "log" "github.com/knative/eventing/contrib/kafka/pkg/controller" - provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/channelwatcher" + topicUtils "github.com/knative/eventing/pkg/provisioners/utils" "github.com/knative/eventing/pkg/tracing" "github.com/knative/pkg/configmap" "github.com/knative/pkg/signals" @@ -41,7 +43,7 @@ func main() { if err != nil { log.Fatalf("unable to create logger: %v", err) } - provisionerConfig, err := provisionerController.GetProvisionerConfig("/etc/config-provisioner") + provisionerConfig, err := utils.GetKafkaConfig("/etc/config-provisioner") if err != nil { logger.Fatal("unable to load provisioner config", zap.Error(err)) } @@ -51,7 +53,14 @@ func main() { logger.Fatal("unable to create manager.", zap.Error(err)) } - kafkaDispatcher, err := dispatcher.NewDispatcher(provisionerConfig.Brokers, provisionerConfig.ConsumerMode, logger) + args := &dispatcher.KafkaDispatcherArgs{ + ClientID: fmt.Sprintf("%s-dispatcher", controller.Name), + Brokers: provisionerConfig.Brokers, + ConsumerMode: provisionerConfig.ConsumerMode, + TopicFunc: topicUtils.TopicName, + Logger: logger, + } + kafkaDispatcher, err := dispatcher.NewDispatcher(args) if err != nil { logger.Fatal("unable to create kafka dispatcher.", zap.Error(err)) } diff --git a/contrib/kafka/config/200-controller-clusterrole.yaml b/contrib/kafka/config/200-controller-clusterrole.yaml new file mode 100644 index 00000000000..90c1cee2620 --- /dev/null +++ b/contrib/kafka/config/200-controller-clusterrole.yaml @@ -0,0 +1,93 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kafka-ch-controller +rules: + - apiGroups: + - messaging.knative.dev + resources: + - kafkachannels + - kafkachannels/status + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - messaging.knative.dev + resources: + - kafkachannels/finalizers + verbs: + - update + - apiGroups: + - "" # Core API group. + resources: + - services + - configmaps + verbs: + - get + - list + - watch + - create + - apiGroups: + - "" # Core API group. + resources: + - services + verbs: + - update + - apiGroups: + - "" # Core API Group. + resources: + - configmaps + resourceNames: + - kafka-ch-dispatcher + verbs: + - update + - apiGroups: + - "" # Core API Group. + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" # Core API group. + resources: + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - deployments + - deployments/status + verbs: + - get + - list + - watch diff --git a/contrib/kafka/config/200-dispatcher-clusterrole.yaml b/contrib/kafka/config/200-dispatcher-clusterrole.yaml new file mode 100644 index 00000000000..b3766ef80d2 --- /dev/null +++ b/contrib/kafka/config/200-dispatcher-clusterrole.yaml @@ -0,0 +1,36 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kafka-ch-dispatcher +rules: + - apiGroups: + - messaging.knative.dev + resources: + - kafkachannels + - kafkachannels/status + verbs: + - get + - list + - watch + - apiGroups: + - "" # Core API group. + resources: + - configmaps + verbs: + - get + - list + - watch diff --git a/contrib/kafka/config/200-dispatcher-service.yaml b/contrib/kafka/config/200-dispatcher-service.yaml new file mode 100644 index 00000000000..c204c83d422 --- /dev/null +++ b/contrib/kafka/config/200-dispatcher-service.yaml @@ -0,0 +1,30 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: kafka-ch-dispatcher + namespace: knative-eventing + labels: + messaging.knative.dev/channel: kafka-channel + messaging.knative.dev/role: dispatcher +spec: + selector: + messaging.knative.dev/channel: kafka-channel + messaging.knative.dev/role: dispatcher + ports: + - port: 80 + protocol: TCP + targetPort: 8080 diff --git a/contrib/kafka/config/200-serviceaccount.yaml b/contrib/kafka/config/200-serviceaccount.yaml new file mode 100644 index 00000000000..8daa2857377 --- /dev/null +++ b/contrib/kafka/config/200-serviceaccount.yaml @@ -0,0 +1,26 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka-ch-controller + namespace: knative-eventing + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka-ch-dispatcher + namespace: knative-eventing diff --git a/contrib/kafka/config/201-clusterrolebinding.yaml b/contrib/kafka/config/201-clusterrolebinding.yaml new file mode 100644 index 00000000000..99de053e752 --- /dev/null +++ b/contrib/kafka/config/201-clusterrolebinding.yaml @@ -0,0 +1,43 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kafka-ch-controller +subjects: + - kind: ServiceAccount + name: kafka-ch-controller + namespace: knative-eventing +roleRef: + kind: ClusterRole + name: kafka-ch-controller + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kafka-ch-dispatcher + namespace: knative-eventing +subjects: + - kind: ServiceAccount + name: kafka-ch-dispatcher + namespace: knative-eventing +roleRef: + kind: ClusterRole + name: kafka-ch-dispatcher + apiGroup: rbac.authorization.k8s.io + diff --git a/contrib/kafka/config/300-kafka-channel.yaml b/contrib/kafka/config/300-kafka-channel.yaml new file mode 100644 index 00000000000..90d9aec28b9 --- /dev/null +++ b/contrib/kafka/config/300-kafka-channel.yaml @@ -0,0 +1,102 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: kafkachannels.messaging.knative.dev + labels: + knative.dev/crd-install: "true" + messaging.knative.dev/subscribable: "true" +spec: + group: messaging.knative.dev + version: v1alpha1 + names: + kind: KafkaChannel + plural: kafkachannels + singular: kafkachannel + categories: + - all + - knative + - messaging + shortNames: + - kc + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type==\"Ready\")].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type==\"Ready\")].reason" + - name: Hostname + type: string + JSONPath: .status.address.hostname + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + validation: + openAPIV3Schema: + properties: + spec: + properties: + numPartitions: + format: int32 + type: integer + description: "Number of partitions of a Kafka topic." + replicationFactor: + format: int16 + type: integer + description: "Replication factor of a Kafka topic." + subscribable: + type: object + properties: + subscribers: + type: array + description: "The list of subscribers that have expressed interest in receiving events from this channel." + items: + required: + - uid + properties: + ref: + type: object + required: + - namespace + - name + - uid + properties: + apiVersion: + type: string + kind: + type: string + name: + type: string + minLength: 1 + namespace: + type: string + minLength: 1 + uid: + type: string + minLength: 1 + uid: + type: string + minLength: 1 + subscriberURI: + type: string + minLength: 1 + replyURI: + type: string + minLength: 1 diff --git a/contrib/kafka/config/400-kafka-config.yaml b/contrib/kafka/config/400-kafka-config.yaml new file mode 100644 index 00000000000..535ff48a812 --- /dev/null +++ b/contrib/kafka/config/400-kafka-config.yaml @@ -0,0 +1,28 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-kafka + namespace: knative-eventing +data: + # Broker URL. Replace this with the URLs for your kafka cluster, + # which is in the format of my-cluster-kafka-bootstrap.my-kafka-namespace:9092. + bootstrap_servers: REPLACE_WITH_CLUSTER_URL + + # Consumer mode to dispatch events from different partitions in parallel. + # By default(multiplex), partitions are multiplexed with a single go channel. + # `multiplex` and `partitions` are valid values. + ## consumer_mode: partitions diff --git a/contrib/kafka/config/500-controller.yaml b/contrib/kafka/config/500-controller.yaml new file mode 100644 index 00000000000..24ab9dc8606 --- /dev/null +++ b/contrib/kafka/config/500-controller.yaml @@ -0,0 +1,52 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-ch-controller + namespace: knative-eventing +spec: + replicas: 1 + selector: + matchLabels: &labels + messaging.knative.dev/channel: kafka-channel + messaging.knative.dev/role: controller + template: + metadata: + labels: *labels + spec: + serviceAccountName: kafka-ch-controller + containers: + - name: controller + image: github.com/knative/eventing/contrib/kafka/cmd/channel_controller + env: + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: config-logging + mountPath: /etc/config-logging + - name: config-kafka + mountPath: /etc/config-kafka + volumes: + - name: config-logging + configMap: + name: config-logging + - name: config-kafka + configMap: + name: config-kafka diff --git a/contrib/kafka/config/500-dispatcher.yaml b/contrib/kafka/config/500-dispatcher.yaml new file mode 100644 index 00000000000..fbd75314e83 --- /dev/null +++ b/contrib/kafka/config/500-dispatcher.yaml @@ -0,0 +1,52 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-ch-dispatcher + namespace: knative-eventing +spec: + replicas: 1 + selector: + matchLabels: &labels + messaging.knative.dev/channel: kafka-channel + messaging.knative.dev/role: dispatcher + template: + metadata: + labels: *labels + spec: + serviceAccountName: kafka-ch-dispatcher + containers: + - name: dispatcher + image: github.com/knative/eventing/contrib/kafka/cmd/channel_dispatcher + env: + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: config-logging + mountPath: /etc/config-logging + - name: config-kafka + mountPath: /etc/config-kafka + volumes: + - name: config-logging + configMap: + name: config-logging + - name: config-kafka + configMap: + name: config-kafka diff --git a/contrib/kafka/config/README.md b/contrib/kafka/config/provisioner/README.md similarity index 93% rename from contrib/kafka/config/README.md rename to contrib/kafka/config/provisioner/README.md index 2664fb684ee..f67ea4c1e3b 100644 --- a/contrib/kafka/config/README.md +++ b/contrib/kafka/config/provisioner/README.md @@ -2,7 +2,7 @@ Deployment steps: -1. Setup [Knative Eventing](../../../DEVELOPMENT.md) +1. Setup [Knative Eventing](../../../../DEVELOPMENT.md) 1. If not done already, install an Apache Kafka cluster! - For Kubernetes a simple installation is done using the @@ -16,7 +16,7 @@ Deployment steps: 1. Now that Apache Kafka is installed, you need to configure the `bootstrap_servers` value in the `kafka-channel-controller-config` ConfigMap, - located inside the `contrib/kafka/config/kafka.yaml` file: + located inside the `contrib/kafka/config/provisioner/kafka.yaml` file: ```yaml ... @@ -39,7 +39,7 @@ Deployment steps: 1. Apply the 'Kafka' ClusterChannelProvisioner, Controller, and Dispatcher: ``` - ko apply -f contrib/kafka/config/kafka.yaml + ko apply -f contrib/kafka/config/provisioner/kafka.yaml ``` 1. Create Channels that reference the 'kafka' ClusterChannelProvisioner. diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/provisioner/kafka.yaml similarity index 100% rename from contrib/kafka/config/kafka.yaml rename to contrib/kafka/config/provisioner/kafka.yaml diff --git a/contrib/kafka/pkg/apis/messaging/register.go b/contrib/kafka/pkg/apis/messaging/register.go new file mode 100644 index 00000000000..8f678adcd23 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/register.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package messaging + +const ( + GroupName = "messaging.knative.dev" +) diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/doc.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/doc.go new file mode 100644 index 00000000000..64e1d2ec055 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 is the v1alpha1 version of the API. +// +k8s:deepcopy-gen=package +// +groupName=messaging.knative.dev +package v1alpha1 diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go new file mode 100644 index 00000000000..e563a7cf759 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go @@ -0,0 +1,35 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "github.com/knative/eventing/contrib/kafka/pkg/utils" +) + +func (c *KafkaChannel) SetDefaults(ctx context.Context) { + c.Spec.SetDefaults(ctx) +} + +func (cs *KafkaChannelSpec) SetDefaults(ctx context.Context) { + if cs.NumPartitions == 0 { + cs.NumPartitions = utils.DefaultNumPartitions + } + if cs.ReplicationFactor == 0 { + cs.ReplicationFactor = utils.DefaultReplicationFactor + } +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go new file mode 100644 index 00000000000..b22e88c9dae --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "github.com/knative/eventing/contrib/kafka/pkg/utils" + "testing" + + "github.com/google/go-cmp/cmp" +) + +const ( + testNumPartitions = 10 + testReplicationFactor = 5 +) + +func TestKafkaChannelDefaults(t *testing.T) { + testCases := map[string]struct { + initial KafkaChannel + expected KafkaChannel + }{ + "nil spec": { + initial: KafkaChannel{}, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: utils.DefaultNumPartitions, + ReplicationFactor: utils.DefaultReplicationFactor, + }, + }, + }, + "numPartitions not set": { + initial: KafkaChannel{ + Spec: KafkaChannelSpec{ + ReplicationFactor: testReplicationFactor, + }, + }, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: utils.DefaultNumPartitions, + ReplicationFactor: testReplicationFactor, + }, + }, + }, + "replicationFactor not set": { + initial: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: testNumPartitions, + }, + }, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: testNumPartitions, + ReplicationFactor: utils.DefaultReplicationFactor, + }, + }, + }, + } + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + tc.initial.SetDefaults(context.TODO()) + if diff := cmp.Diff(tc.expected, tc.initial); diff != "" { + t.Fatalf("Unexpected defaults (-want, +got): %s", diff) + } + }) + } +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go new file mode 100644 index 00000000000..686544c4a3e --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go @@ -0,0 +1,135 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +var kc = duckv1alpha1.NewLivingConditionSet( + KafkaChannelConditionTopicReady, + KafkaChannelConditionDispatcherReady, + KafkaChannelConditionServiceReady, + KafkaChannelConditionEndpointsReady, + KafkaChannelConditionAddressable, + KafkaChannelConditionChannelServiceReady) + +const ( + // KafkaChannelConditionReady has status True when all subconditions below have been set to True. + KafkaChannelConditionReady = duckv1alpha1.ConditionReady + + // KafkaChannelConditionDispatcherReady has status True when a Dispatcher deployment is ready + // Keyed off appsv1.DeploymentAvailable, which means minimum available replicas required are up + // and running for at least minReadySeconds. + KafkaChannelConditionDispatcherReady duckv1alpha1.ConditionType = "DispatcherReady" + + // KafkaChannelConditionServiceReady has status True when a k8s Service is ready. This + // basically just means it exists because there's no meaningful status in Service. See Endpoints + // below. + KafkaChannelConditionServiceReady duckv1alpha1.ConditionType = "ServiceReady" + + // KafkaChannelConditionEndpointsReady has status True when a k8s Service Endpoints are backed + // by at least one endpoint. + KafkaChannelConditionEndpointsReady duckv1alpha1.ConditionType = "EndpointsReady" + + // KafkaChannelConditionAddressable has status true when this KafkaChannel meets + // the Addressable contract and has a non-empty hostname. + KafkaChannelConditionAddressable duckv1alpha1.ConditionType = "Addressable" + + // KafkaChannelConditionServiceReady has status True when a k8s Service representing the channel is ready. + // Because this uses ExternalName, there are no endpoints to check. + KafkaChannelConditionChannelServiceReady duckv1alpha1.ConditionType = "ChannelServiceReady" + + // KafkaChannelConditionTopicReady has status True when the Kafka topic to use by the channel exists. + KafkaChannelConditionTopicReady duckv1alpha1.ConditionType = "TopicReady" +) + +// GetCondition returns the condition currently associated with the given type, or nil. +func (cs *KafkaChannelStatus) GetCondition(t duckv1alpha1.ConditionType) *duckv1alpha1.Condition { + return kc.Manage(cs).GetCondition(t) +} + +// IsReady returns true if the resource is ready overall. +func (cs *KafkaChannelStatus) IsReady() bool { + return kc.Manage(cs).IsHappy() +} + +// InitializeConditions sets relevant unset conditions to Unknown state. +func (cs *KafkaChannelStatus) InitializeConditions() { + kc.Manage(cs).InitializeConditions() +} + +// TODO: Use the new beta duck types. +func (cs *KafkaChannelStatus) SetAddress(hostname string) { + cs.Address.Hostname = hostname + if hostname != "" { + kc.Manage(cs).MarkTrue(KafkaChannelConditionAddressable) + } else { + kc.Manage(cs).MarkFalse(KafkaChannelConditionAddressable, "EmptyHostname", "hostname is the empty string") + } +} + +func (cs *KafkaChannelStatus) MarkDispatcherFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionDispatcherReady, reason, messageFormat, messageA...) +} + +// TODO: Unify this with the ones from Eventing. Say: Broker, Trigger. +func (cs *KafkaChannelStatus) PropagateDispatcherStatus(ds *appsv1.DeploymentStatus) { + for _, cond := range ds.Conditions { + if cond.Type == appsv1.DeploymentAvailable { + if cond.Status != corev1.ConditionTrue { + cs.MarkDispatcherFailed("DispatcherNotReady", "Dispatcher Deployment is not ready: %s : %s", cond.Reason, cond.Message) + } else { + kc.Manage(cs).MarkTrue(KafkaChannelConditionDispatcherReady) + } + } + } +} + +func (cs *KafkaChannelStatus) MarkServiceFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionServiceReady, reason, messageFormat, messageA...) +} + +func (cs *KafkaChannelStatus) MarkServiceTrue() { + kc.Manage(cs).MarkTrue(KafkaChannelConditionServiceReady) +} + +func (cs *KafkaChannelStatus) MarkChannelServiceFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionChannelServiceReady, reason, messageFormat, messageA...) +} + +func (cs *KafkaChannelStatus) MarkChannelServiceTrue() { + kc.Manage(cs).MarkTrue(KafkaChannelConditionChannelServiceReady) +} + +func (cs *KafkaChannelStatus) MarkEndpointsFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionEndpointsReady, reason, messageFormat, messageA...) +} + +func (cs *KafkaChannelStatus) MarkEndpointsTrue() { + kc.Manage(cs).MarkTrue(KafkaChannelConditionEndpointsReady) +} + +func (cs *KafkaChannelStatus) MarkTopicTrue() { + kc.Manage(cs).MarkTrue(KafkaChannelConditionTopicReady) +} + +func (cs *KafkaChannelStatus) MarkTopicFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionTopicReady, reason, messageFormat, messageA...) +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go new file mode 100644 index 00000000000..d6a86f6582e --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go @@ -0,0 +1,404 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +var condReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionReady, + Status: corev1.ConditionTrue, +} + +var condDispatcherReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionTrue, +} + +var condDispatcherNotReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionFalse, +} + +var condDispatcherServiceReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionServiceReady, + Status: corev1.ConditionTrue, +} + +var condDispatcherEndpointsReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionEndpointsReady, + Status: corev1.ConditionTrue, +} + +var condTopicReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionTopicReady, + Status: corev1.ConditionTrue, +} + +var condDispatcherAddressable = duckv1alpha1.Condition{ + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionTrue, +} + +var deploymentConditionReady = appsv1.DeploymentCondition{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, +} + +var deploymentConditionNotReady = appsv1.DeploymentCondition{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionFalse, +} + +var deploymentStatusReady = &appsv1.DeploymentStatus{Conditions: []appsv1.DeploymentCondition{deploymentConditionReady}} +var deploymentStatusNotReady = &appsv1.DeploymentStatus{Conditions: []appsv1.DeploymentCondition{deploymentConditionNotReady}} + +var ignoreAllButTypeAndStatus = cmpopts.IgnoreFields( + duckv1alpha1.Condition{}, + "LastTransitionTime", "Message", "Reason", "Severity") + +var ignoreLastTransitionTime = cmpopts.IgnoreFields(duckv1alpha1.Condition{}, "LastTransitionTime") + +func TestChannelGetCondition(t *testing.T) { + tests := []struct { + name string + cs *KafkaChannelStatus + condQuery duckv1alpha1.ConditionType + want *duckv1alpha1.Condition + }{{ + name: "single condition", + cs: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{ + condReady, + }, + }, + }, + condQuery: duckv1alpha1.ConditionReady, + want: &condReady, + }, { + name: "unknown condition", + cs: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{ + condReady, + condDispatcherNotReady, + }, + }, + }, + condQuery: duckv1alpha1.ConditionType("foo"), + want: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.cs.GetCondition(test.condQuery) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("unexpected condition (-want, +got) = %v", diff) + } + }) + } +} + +func TestChannelInitializeConditions(t *testing.T) { + tests := []struct { + name string + cs *KafkaChannelStatus + want *KafkaChannelStatus + }{{ + name: "empty", + cs: &KafkaChannelStatus{}, + want: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionChannelServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionEndpointsReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionTopicReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + }, { + name: "one false", + cs: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + want: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionChannelServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionFalse, + }, { + Type: KafkaChannelConditionEndpointsReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionTopicReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + }, { + name: "one true", + cs: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + want: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionChannelServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionTrue, + }, { + Type: KafkaChannelConditionEndpointsReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionTopicReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.cs.InitializeConditions() + if diff := cmp.Diff(test.want, test.cs, ignoreAllButTypeAndStatus); diff != "" { + t.Errorf("unexpected conditions (-want, +got) = %v", diff) + } + }) + } +} + +func TestChannelIsReady(t *testing.T) { + tests := []struct { + name string + markServiceReady bool + markChannelServiceReady bool + setAddress bool + markEndpointsReady bool + markTopicReady bool + wantReady bool + dispatcherStatus *appsv1.DeploymentStatus + }{{ + name: "all happy", + markServiceReady: true, + markChannelServiceReady: true, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + markTopicReady: true, + wantReady: true, + }, { + name: "service not ready", + markServiceReady: false, + markChannelServiceReady: false, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + markTopicReady: true, + wantReady: false, + }, { + name: "endpoints not ready", + markServiceReady: true, + markChannelServiceReady: false, + markEndpointsReady: false, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + markTopicReady: true, + wantReady: false, + }, { + name: "deployment not ready", + markServiceReady: true, + markEndpointsReady: true, + markChannelServiceReady: false, + dispatcherStatus: deploymentStatusNotReady, + setAddress: true, + markTopicReady: true, + wantReady: false, + }, { + name: "address not set", + markServiceReady: true, + markChannelServiceReady: false, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: false, + markTopicReady: true, + wantReady: false, + }, { + name: "channel service not ready", + markServiceReady: true, + markChannelServiceReady: false, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + markTopicReady: true, + wantReady: false, + }, { + name: "topic not ready", + markServiceReady: true, + markChannelServiceReady: true, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + markTopicReady: false, + wantReady: false, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cs := &KafkaChannelStatus{} + cs.InitializeConditions() + if test.markServiceReady { + cs.MarkServiceTrue() + } else { + cs.MarkServiceFailed("NotReadyService", "testing") + } + if test.markChannelServiceReady { + cs.MarkChannelServiceTrue() + } else { + cs.MarkChannelServiceFailed("NotReadyChannelService", "testing") + } + if test.setAddress { + cs.SetAddress("foo.bar") + } + if test.markEndpointsReady { + cs.MarkEndpointsTrue() + } else { + cs.MarkEndpointsFailed("NotReadyEndpoints", "testing") + } + if test.dispatcherStatus != nil { + cs.PropagateDispatcherStatus(test.dispatcherStatus) + } else { + cs.MarkDispatcherFailed("NotReadyDispatcher", "testing") + } + if test.markTopicReady { + cs.MarkTopicTrue() + } else { + cs.MarkTopicFailed("NotReadyTopic", "testing") + } + got := cs.IsReady() + if test.wantReady != got { + t.Errorf("unexpected readiness: want %v, got %v", test.wantReady, got) + } + }) + } +} + +func TestKafkaChannelStatus_SetAddressable(t *testing.T) { + testCases := map[string]struct { + domainInternal string + want *KafkaChannelStatus + }{ + "empty string": { + want: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{ + { + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionFalse, + }, + // Note that Ready is here because when the condition is marked False, duck + // automatically sets Ready to false. + { + Type: KafkaChannelConditionReady, + Status: corev1.ConditionFalse, + }, + }, + }, + }, + }, + "has domain": { + domainInternal: "test-domain", + want: &KafkaChannelStatus{ + Address: duckv1alpha1.Addressable{ + Hostname: "test-domain", + }, + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{ + { + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + }, + } + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + cs := &KafkaChannelStatus{} + cs.SetAddress(tc.domainInternal) + if diff := cmp.Diff(tc.want, cs, ignoreAllButTypeAndStatus); diff != "" { + t.Errorf("unexpected conditions (-want, +got) = %v", diff) + } + }) + } +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go new file mode 100644 index 00000000000..d8b6f773118 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + "github.com/knative/pkg/apis" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + "github.com/knative/pkg/webhook" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KafkaChannel is a resource representing a Kafka Channel. +type KafkaChannel struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of the Channel. + Spec KafkaChannelSpec `json:"spec,omitempty"` + + // Status represents the current state of the KafkaChannel. This data may be out of + // date. + // +optional + Status KafkaChannelStatus `json:"status,omitempty"` +} + +// Check that Channel can be validated, can be defaulted, and has immutable fields. +var _ apis.Validatable = (*KafkaChannel)(nil) +var _ apis.Defaultable = (*KafkaChannel)(nil) +var _ runtime.Object = (*KafkaChannel)(nil) +var _ webhook.GenericCRD = (*KafkaChannel)(nil) + +// KafkaChannelSpec defines the specification for a KafkaChannel. +type KafkaChannelSpec struct { + // NumPartitions is the number of partitions of a Kafka topic. By default, it is set to 1. + NumPartitions int32 `json:"numPartitions"` + + // ReplicationFactor is the replication factor of a Kafka topic. By default, it is set to 1. + ReplicationFactor int16 `json:"replicationFactor"` + + // KafkaChannel conforms to Duck type Subscribable. + Subscribable *eventingduck.Subscribable `json:"subscribable,omitempty"` +} + +// KafkaChannelStatus represents the current state of a KafkaChannel. +type KafkaChannelStatus struct { + // inherits duck/v1alpha1 Status, which currently provides: + // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller. + // * Conditions - the latest available observations of a resource's current state. + duckv1alpha1.Status `json:",inline"` + + // KafkaChannel is Addressable. It currently exposes the endpoint as a + // fully-qualified DNS name which will distribute traffic over the + // provided targets from inside the cluster. + // + // It generally has the form {channel}.{namespace}.svc.{cluster domain name} + Address duckv1alpha1.Addressable `json:"address,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KafkaChannelList is a collection of KafkaChannels. +type KafkaChannelList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []KafkaChannel `json:"items"` +} + +// GetGroupVersionKind returns GroupVersionKind for KafkaChannels +func (c *KafkaChannel) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("KafkaChannel") +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go new file mode 100644 index 00000000000..c29e10d2f7b --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + "github.com/knative/pkg/apis" +) + +func (c *KafkaChannel) Validate(ctx context.Context) *apis.FieldError { + return c.Spec.Validate(ctx).ViaField("spec") +} + +func (cs *KafkaChannelSpec) Validate(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + + if cs.NumPartitions <= 0 { + fe := apis.ErrInvalidValue(cs.NumPartitions, "numPartitions") + errs = errs.Also(fe) + } + + if cs.ReplicationFactor <= 0 { + fe := apis.ErrInvalidValue(cs.ReplicationFactor, "replicationFactor") + errs = errs.Also(fe) + } + + if cs.Subscribable != nil { + for i, subscriber := range cs.Subscribable.Subscribers { + if subscriber.ReplyURI == "" && subscriber.SubscriberURI == "" { + fe := apis.ErrMissingField("replyURI", "subscriberURI") + fe.Details = "expected at least one of, got none" + errs = errs.Also(fe.ViaField(fmt.Sprintf("subscriber[%d]", i)).ViaField("subscribable")) + } + } + } + return errs +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go new file mode 100644 index 00000000000..e5269ad70a5 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go @@ -0,0 +1,134 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "github.com/google/go-cmp/cmp" + "github.com/knative/pkg/webhook" + "testing" + + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + "github.com/knative/pkg/apis" +) + +func TestKafkaChannelValidation(t *testing.T) { + testCases := map[string]struct { + cr webhook.GenericCRD + want *apis.FieldError + }{ + "empty spec": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{}, + }, + want: func() *apis.FieldError { + var errs *apis.FieldError + fe := apis.ErrInvalidValue(0, "spec.numPartitions") + errs = errs.Also(fe) + fe = apis.ErrInvalidValue(0, "spec.replicationFactor") + errs = errs.Also(fe) + return errs + }(), + }, + "negative numPartitions": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: -10, + ReplicationFactor: 1, + }, + }, + want: func() *apis.FieldError { + fe := apis.ErrInvalidValue(-10, "spec.numPartitions") + return fe + }(), + }, + "negative replicationFactor": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: 1, + ReplicationFactor: -10, + }, + }, + want: func() *apis.FieldError { + fe := apis.ErrInvalidValue(-10, "spec.replicationFactor") + return fe + }(), + }, + "valid subscribers array": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: 1, + ReplicationFactor: 1, + Subscribable: &eventingduck.Subscribable{ + Subscribers: []eventingduck.SubscriberSpec{{ + SubscriberURI: "subscriberendpoint", + ReplyURI: "resultendpoint", + }}, + }}, + }, + want: nil, + }, + "empty subscriber at index 1": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: 1, + ReplicationFactor: 1, + Subscribable: &eventingduck.Subscribable{ + Subscribers: []eventingduck.SubscriberSpec{{ + SubscriberURI: "subscriberendpoint", + ReplyURI: "replyendpoint", + }, {}}, + }}, + }, + want: func() *apis.FieldError { + fe := apis.ErrMissingField("spec.subscribable.subscriber[1].replyURI", "spec.subscribable.subscriber[1].subscriberURI") + fe.Details = "expected at least one of, got none" + return fe + }(), + }, + "two empty subscribers": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: 1, + ReplicationFactor: 1, + Subscribable: &eventingduck.Subscribable{ + Subscribers: []eventingduck.SubscriberSpec{{}, {}}, + }, + }, + }, + want: func() *apis.FieldError { + var errs *apis.FieldError + fe := apis.ErrMissingField("spec.subscribable.subscriber[0].replyURI", "spec.subscribable.subscriber[0].subscriberURI") + fe.Details = "expected at least one of, got none" + errs = errs.Also(fe) + fe = apis.ErrMissingField("spec.subscribable.subscriber[1].replyURI", "spec.subscribable.subscriber[1].subscriberURI") + fe.Details = "expected at least one of, got none" + errs = errs.Also(fe) + return errs + }(), + }, + } + + for n, test := range testCases { + t.Run(n, func(t *testing.T) { + got := test.cr.Validate(context.Background()) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("%s: validate (-want, +got) = %v", n, diff) + } + }) + } +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go new file mode 100644 index 00000000000..e320ce74970 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: messaging.GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KafkaChannel{}, + &KafkaChannelList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/zz_generated.deepcopy.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..bc991679170 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,126 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + duckv1alpha1 "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannel) DeepCopyInto(out *KafkaChannel) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannel. +func (in *KafkaChannel) DeepCopy() *KafkaChannel { + if in == nil { + return nil + } + out := new(KafkaChannel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaChannel) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannelList) DeepCopyInto(out *KafkaChannelList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KafkaChannel, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannelList. +func (in *KafkaChannelList) DeepCopy() *KafkaChannelList { + if in == nil { + return nil + } + out := new(KafkaChannelList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaChannelList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannelSpec) DeepCopyInto(out *KafkaChannelSpec) { + *out = *in + if in.Subscribable != nil { + in, out := &in.Subscribable, &out.Subscribable + *out = new(duckv1alpha1.Subscribable) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannelSpec. +func (in *KafkaChannelSpec) DeepCopy() *KafkaChannelSpec { + if in == nil { + return nil + } + out := new(KafkaChannelSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannelStatus) DeepCopyInto(out *KafkaChannelStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + in.Address.DeepCopyInto(&out.Address) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannelStatus. +func (in *KafkaChannelStatus) DeepCopy() *KafkaChannelStatus { + if in == nil { + return nil + } + out := new(KafkaChannelStatus) + in.DeepCopyInto(out) + return out +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/clientset.go b/contrib/kafka/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 00000000000..bf01dfec9fa --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + MessagingV1alpha1() messagingv1alpha1.MessagingV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Messaging() messagingv1alpha1.MessagingV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + messagingV1alpha1 *messagingv1alpha1.MessagingV1alpha1Client +} + +// MessagingV1alpha1 retrieves the MessagingV1alpha1Client +func (c *Clientset) MessagingV1alpha1() messagingv1alpha1.MessagingV1alpha1Interface { + return c.messagingV1alpha1 +} + +// Deprecated: Messaging retrieves the default version of MessagingClient. +// Please explicitly pick a version. +func (c *Clientset) Messaging() messagingv1alpha1.MessagingV1alpha1Interface { + return c.messagingV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.messagingV1alpha1, err = messagingv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.messagingV1alpha1 = messagingv1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.messagingV1alpha1 = messagingv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/doc.go b/contrib/kafka/pkg/client/clientset/versioned/doc.go new file mode 100644 index 00000000000..1122e50bfc3 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/contrib/kafka/pkg/client/clientset/versioned/fake/clientset_generated.go b/contrib/kafka/pkg/client/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 00000000000..2cd9095901a --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1" + fakemessagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +var _ clientset.Interface = &Clientset{} + +// MessagingV1alpha1 retrieves the MessagingV1alpha1Client +func (c *Clientset) MessagingV1alpha1() messagingv1alpha1.MessagingV1alpha1Interface { + return &fakemessagingv1alpha1.FakeMessagingV1alpha1{Fake: &c.Fake} +} + +// Messaging retrieves the MessagingV1alpha1Client +func (c *Clientset) Messaging() messagingv1alpha1.MessagingV1alpha1Interface { + return &fakemessagingv1alpha1.FakeMessagingV1alpha1{Fake: &c.Fake} +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/fake/doc.go b/contrib/kafka/pkg/client/clientset/versioned/fake/doc.go new file mode 100644 index 00000000000..87f3c3e0b01 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/contrib/kafka/pkg/client/clientset/versioned/fake/register.go b/contrib/kafka/pkg/client/clientset/versioned/fake/register.go new file mode 100644 index 00000000000..d8716c25725 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/fake/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + messagingv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/scheme/doc.go b/contrib/kafka/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 00000000000..7d76538485b --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/contrib/kafka/pkg/client/clientset/versioned/scheme/register.go b/contrib/kafka/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 00000000000..655d74d7620 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + messagingv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/doc.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/doc.go new file mode 100644 index 00000000000..a1c6bb9fe8f --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/doc.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/doc.go new file mode 100644 index 00000000000..a00e5d7b21a --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_kafkachannel.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_kafkachannel.go new file mode 100644 index 00000000000..4a7fad7f9ae --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_kafkachannel.go @@ -0,0 +1,140 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKafkaChannels implements KafkaChannelInterface +type FakeKafkaChannels struct { + Fake *FakeMessagingV1alpha1 + ns string +} + +var kafkachannelsResource = schema.GroupVersionResource{Group: "messaging.knative.dev", Version: "v1alpha1", Resource: "kafkachannels"} + +var kafkachannelsKind = schema.GroupVersionKind{Group: "messaging.knative.dev", Version: "v1alpha1", Kind: "KafkaChannel"} + +// Get takes name of the kafkaChannel, and returns the corresponding kafkaChannel object, and an error if there is any. +func (c *FakeKafkaChannels) Get(name string, options v1.GetOptions) (result *v1alpha1.KafkaChannel, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(kafkachannelsResource, c.ns, name), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} + +// List takes label and field selectors, and returns the list of KafkaChannels that match those selectors. +func (c *FakeKafkaChannels) List(opts v1.ListOptions) (result *v1alpha1.KafkaChannelList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(kafkachannelsResource, kafkachannelsKind, c.ns, opts), &v1alpha1.KafkaChannelList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.KafkaChannelList{ListMeta: obj.(*v1alpha1.KafkaChannelList).ListMeta} + for _, item := range obj.(*v1alpha1.KafkaChannelList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kafkaChannels. +func (c *FakeKafkaChannels) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(kafkachannelsResource, c.ns, opts)) + +} + +// Create takes the representation of a kafkaChannel and creates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *FakeKafkaChannels) Create(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(kafkachannelsResource, c.ns, kafkaChannel), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} + +// Update takes the representation of a kafkaChannel and updates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *FakeKafkaChannels) Update(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(kafkachannelsResource, c.ns, kafkaChannel), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKafkaChannels) UpdateStatus(kafkaChannel *v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(kafkachannelsResource, "status", c.ns, kafkaChannel), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} + +// Delete takes name of the kafkaChannel and deletes it. Returns an error if one occurs. +func (c *FakeKafkaChannels) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(kafkachannelsResource, c.ns, name), &v1alpha1.KafkaChannel{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKafkaChannels) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(kafkachannelsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.KafkaChannelList{}) + return err +} + +// Patch applies the patch and returns the patched kafkaChannel. +func (c *FakeKafkaChannels) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.KafkaChannel, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(kafkachannelsResource, c.ns, name, data, subresources...), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_messaging_client.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_messaging_client.go new file mode 100644 index 00000000000..220a1e3cb47 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_messaging_client.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeMessagingV1alpha1 struct { + *testing.Fake +} + +func (c *FakeMessagingV1alpha1) KafkaChannels(namespace string) v1alpha1.KafkaChannelInterface { + return &FakeKafkaChannels{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeMessagingV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/generated_expansion.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/generated_expansion.go new file mode 100644 index 00000000000..5b2dec5a0f5 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type KafkaChannelExpansion interface{} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/kafkachannel.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/kafkachannel.go new file mode 100644 index 00000000000..d7ad9d39cb7 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/kafkachannel.go @@ -0,0 +1,174 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + scheme "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KafkaChannelsGetter has a method to return a KafkaChannelInterface. +// A group's client should implement this interface. +type KafkaChannelsGetter interface { + KafkaChannels(namespace string) KafkaChannelInterface +} + +// KafkaChannelInterface has methods to work with KafkaChannel resources. +type KafkaChannelInterface interface { + Create(*v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) + Update(*v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) + UpdateStatus(*v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.KafkaChannel, error) + List(opts v1.ListOptions) (*v1alpha1.KafkaChannelList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.KafkaChannel, err error) + KafkaChannelExpansion +} + +// kafkaChannels implements KafkaChannelInterface +type kafkaChannels struct { + client rest.Interface + ns string +} + +// newKafkaChannels returns a KafkaChannels +func newKafkaChannels(c *MessagingV1alpha1Client, namespace string) *kafkaChannels { + return &kafkaChannels{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the kafkaChannel, and returns the corresponding kafkaChannel object, and an error if there is any. +func (c *kafkaChannels) Get(name string, options v1.GetOptions) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kafkachannels"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KafkaChannels that match those selectors. +func (c *kafkaChannels) List(opts v1.ListOptions) (result *v1alpha1.KafkaChannelList, err error) { + result = &v1alpha1.KafkaChannelList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kafkachannels"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kafkaChannels. +func (c *kafkaChannels) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("kafkachannels"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a kafkaChannel and creates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *kafkaChannels) Create(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Post(). + Namespace(c.ns). + Resource("kafkachannels"). + Body(kafkaChannel). + Do(). + Into(result) + return +} + +// Update takes the representation of a kafkaChannel and updates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *kafkaChannels) Update(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kafkachannels"). + Name(kafkaChannel.Name). + Body(kafkaChannel). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *kafkaChannels) UpdateStatus(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kafkachannels"). + Name(kafkaChannel.Name). + SubResource("status"). + Body(kafkaChannel). + Do(). + Into(result) + return +} + +// Delete takes name of the kafkaChannel and deletes it. Returns an error if one occurs. +func (c *kafkaChannels) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kafkachannels"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kafkaChannels) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kafkachannels"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched kafkaChannel. +func (c *kafkaChannels) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("kafkachannels"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/messaging_client.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/messaging_client.go new file mode 100644 index 00000000000..203bea4573b --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/messaging_client.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type MessagingV1alpha1Interface interface { + RESTClient() rest.Interface + KafkaChannelsGetter +} + +// MessagingV1alpha1Client is used to interact with features provided by the messaging.knative.dev group. +type MessagingV1alpha1Client struct { + restClient rest.Interface +} + +func (c *MessagingV1alpha1Client) KafkaChannels(namespace string) KafkaChannelInterface { + return newKafkaChannels(c, namespace) +} + +// NewForConfig creates a new MessagingV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*MessagingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &MessagingV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new MessagingV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *MessagingV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new MessagingV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *MessagingV1alpha1Client { + return &MessagingV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *MessagingV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/factory.go b/contrib/kafka/pkg/client/informers/externalversions/factory.go new file mode 100644 index 00000000000..66992469f3f --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/factory.go @@ -0,0 +1,180 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces" + messaging "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Messaging() messaging.Interface +} + +func (f *sharedInformerFactory) Messaging() messaging.Interface { + return messaging.New(f, f.namespace, f.tweakListOptions) +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/generic.go b/contrib/kafka/pkg/client/informers/externalversions/generic.go new file mode 100644 index 00000000000..761bf80c064 --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/generic.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=messaging.knative.dev, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("kafkachannels"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Messaging().V1alpha1().KafkaChannels().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 00000000000..644293f3f79 --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/contrib/kafka/pkg/client/informers/externalversions/messaging/interface.go b/contrib/kafka/pkg/client/informers/externalversions/messaging/interface.go new file mode 100644 index 00000000000..2a2a4e5ecfc --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/messaging/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package messaging + +import ( + internalinterfaces "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/interface.go b/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/interface.go new file mode 100644 index 00000000000..9e09a032739 --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // KafkaChannels returns a KafkaChannelInformer. + KafkaChannels() KafkaChannelInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// KafkaChannels returns a KafkaChannelInformer. +func (v *version) KafkaChannels() KafkaChannelInformer { + return &kafkaChannelInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/kafkachannel.go b/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/kafkachannel.go new file mode 100644 index 00000000000..ddd6ae98f8c --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/kafkachannel.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + versioned "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/listers/messaging/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// KafkaChannelInformer provides access to a shared informer and lister for +// KafkaChannels. +type KafkaChannelInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.KafkaChannelLister +} + +type kafkaChannelInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewKafkaChannelInformer constructs a new informer for KafkaChannel type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewKafkaChannelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredKafkaChannelInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredKafkaChannelInformer constructs a new informer for KafkaChannel type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredKafkaChannelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MessagingV1alpha1().KafkaChannels(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MessagingV1alpha1().KafkaChannels(namespace).Watch(options) + }, + }, + &messagingv1alpha1.KafkaChannel{}, + resyncPeriod, + indexers, + ) +} + +func (f *kafkaChannelInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredKafkaChannelInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *kafkaChannelInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&messagingv1alpha1.KafkaChannel{}, f.defaultInformer) +} + +func (f *kafkaChannelInformer) Lister() v1alpha1.KafkaChannelLister { + return v1alpha1.NewKafkaChannelLister(f.Informer().GetIndexer()) +} diff --git a/contrib/kafka/pkg/client/listers/messaging/v1alpha1/expansion_generated.go b/contrib/kafka/pkg/client/listers/messaging/v1alpha1/expansion_generated.go new file mode 100644 index 00000000000..d45c47feb0d --- /dev/null +++ b/contrib/kafka/pkg/client/listers/messaging/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// KafkaChannelListerExpansion allows custom methods to be added to +// KafkaChannelLister. +type KafkaChannelListerExpansion interface{} + +// KafkaChannelNamespaceListerExpansion allows custom methods to be added to +// KafkaChannelNamespaceLister. +type KafkaChannelNamespaceListerExpansion interface{} diff --git a/contrib/kafka/pkg/client/listers/messaging/v1alpha1/kafkachannel.go b/contrib/kafka/pkg/client/listers/messaging/v1alpha1/kafkachannel.go new file mode 100644 index 00000000000..2401d9dbfe2 --- /dev/null +++ b/contrib/kafka/pkg/client/listers/messaging/v1alpha1/kafkachannel.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// KafkaChannelLister helps list KafkaChannels. +type KafkaChannelLister interface { + // List lists all KafkaChannels in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.KafkaChannel, err error) + // KafkaChannels returns an object that can list and get KafkaChannels. + KafkaChannels(namespace string) KafkaChannelNamespaceLister + KafkaChannelListerExpansion +} + +// kafkaChannelLister implements the KafkaChannelLister interface. +type kafkaChannelLister struct { + indexer cache.Indexer +} + +// NewKafkaChannelLister returns a new KafkaChannelLister. +func NewKafkaChannelLister(indexer cache.Indexer) KafkaChannelLister { + return &kafkaChannelLister{indexer: indexer} +} + +// List lists all KafkaChannels in the indexer. +func (s *kafkaChannelLister) List(selector labels.Selector) (ret []*v1alpha1.KafkaChannel, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KafkaChannel)) + }) + return ret, err +} + +// KafkaChannels returns an object that can list and get KafkaChannels. +func (s *kafkaChannelLister) KafkaChannels(namespace string) KafkaChannelNamespaceLister { + return kafkaChannelNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// KafkaChannelNamespaceLister helps list and get KafkaChannels. +type KafkaChannelNamespaceLister interface { + // List lists all KafkaChannels in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.KafkaChannel, err error) + // Get retrieves the KafkaChannel from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.KafkaChannel, error) + KafkaChannelNamespaceListerExpansion +} + +// kafkaChannelNamespaceLister implements the KafkaChannelNamespaceLister +// interface. +type kafkaChannelNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all KafkaChannels in the indexer for a given namespace. +func (s kafkaChannelNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.KafkaChannel, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KafkaChannel)) + }) + return ret, err +} + +// Get retrieves the KafkaChannel from the indexer for a given namespace and name. +func (s kafkaChannelNamespaceLister) Get(name string) (*v1alpha1.KafkaChannel, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("kafkachannel"), name) + } + return obj.(*v1alpha1.KafkaChannel), nil +} diff --git a/contrib/kafka/pkg/controller/channel/provider.go b/contrib/kafka/pkg/controller/channel/provider.go index 73eab2e8d22..b55a956c9a1 100644 --- a/contrib/kafka/pkg/controller/channel/provider.go +++ b/contrib/kafka/pkg/controller/channel/provider.go @@ -30,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" common "github.com/knative/eventing/contrib/kafka/pkg/controller" + "github.com/knative/eventing/contrib/kafka/pkg/utils" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/pkg/system" ) @@ -51,7 +52,7 @@ type reconciler struct { client client.Client recorder record.EventRecorder logger *zap.Logger - config *common.KafkaProvisionerConfig + config *utils.KafkaConfig // Using a shared kafkaClusterAdmin does not work currently because of an issue with // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. kafkaClusterAdmin sarama.ClusterAdmin @@ -61,7 +62,7 @@ type reconciler struct { var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Channel controller. -func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { +func ProvideController(mgr manager.Manager, config *utils.KafkaConfig, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Channel. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ diff --git a/contrib/kafka/pkg/controller/channel/reconcile.go b/contrib/kafka/pkg/controller/channel/reconcile.go index 42ed5c92d8c..c2ac661d2d5 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile.go +++ b/contrib/kafka/pkg/controller/channel/reconcile.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "github.com/Shopify/sarama" "go.uber.org/zap" @@ -39,12 +40,6 @@ import ( const ( finalizerName = controllerAgentName - // DefaultNumPartitions defines the default number of partitions - DefaultNumPartitions = 1 - - // DefaultReplicationFactor defines the default number of replications - DefaultReplicationFactor = 1 - // Name of the corev1.Events emitted from the reconciliation process dispatcherReconcileFailed = "DispatcherReconcileFailed" dispatcherUpdateStatusFailed = "DispatcherUpdateStatusFailed" @@ -189,7 +184,7 @@ func (r *reconciler) shouldReconcile(channel *eventingv1alpha1.Channel, clusterC } func (r *reconciler) provisionChannel(channel *eventingv1alpha1.Channel, kafkaClusterAdmin sarama.ClusterAdmin) error { - topicName := topicUtils.TopicName(controller.KafkaChannelSeparator, channel.Namespace, channel.Name) + topicName := topicUtils.TopicName(utils.KafkaChannelSeparator, channel.Namespace, channel.Name) r.logger.Info("creating topic on kafka cluster", zap.String("topic", topicName)) var arguments channelArgs @@ -203,11 +198,11 @@ func (r *reconciler) provisionChannel(channel *eventingv1alpha1.Channel, kafkaCl } if arguments.NumPartitions == 0 { - arguments.NumPartitions = DefaultNumPartitions + arguments.NumPartitions = utils.DefaultNumPartitions } if arguments.ReplicationFactor == 0 { - arguments.ReplicationFactor = DefaultReplicationFactor + arguments.ReplicationFactor = utils.DefaultReplicationFactor } err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ @@ -225,7 +220,7 @@ func (r *reconciler) provisionChannel(channel *eventingv1alpha1.Channel, kafkaCl } func (r *reconciler) deprovisionChannel(channel *eventingv1alpha1.Channel, kafkaClusterAdmin sarama.ClusterAdmin) error { - topicName := topicUtils.TopicName(controller.KafkaChannelSeparator, channel.Namespace, channel.Name) + topicName := topicUtils.TopicName(utils.KafkaChannelSeparator, channel.Namespace, channel.Name) r.logger.Info("deleting topic on kafka cluster", zap.String("topic", topicName)) err := kafkaClusterAdmin.DeleteTopic(topicName) @@ -250,7 +245,7 @@ func (r *reconciler) getClusterChannelProvisioner() (*eventingv1alpha1.ClusterCh return clusterChannelProvisioner, nil } -func createKafkaAdminClient(config *controller.KafkaProvisionerConfig) (sarama.ClusterAdmin, error) { +func createKafkaAdminClient(config *utils.KafkaConfig) (sarama.ClusterAdmin, error) { saramaConf := sarama.NewConfig() saramaConf.Version = sarama.V1_1_0_0 saramaConf.ClientID = controllerAgentName diff --git a/contrib/kafka/pkg/controller/channel/reconcile_test.go b/contrib/kafka/pkg/controller/channel/reconcile_test.go index 315ee33d08e..d2e13fb4276 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile_test.go +++ b/contrib/kafka/pkg/controller/channel/reconcile_test.go @@ -26,7 +26,7 @@ import ( "github.com/Shopify/sarama" "github.com/google/go-cmp/cmp" - "github.com/knative/eventing/contrib/kafka/pkg/controller" + . "github.com/knative/eventing/contrib/kafka/pkg/utils" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" util "github.com/knative/eventing/pkg/provisioners" @@ -535,8 +535,8 @@ func om(namespace, name string) metav1.ObjectMeta { } } -func getControllerConfig() *controller.KafkaProvisionerConfig { - return &controller.KafkaProvisionerConfig{ +func getControllerConfig() *KafkaConfig { + return &KafkaConfig{ Brokers: []string{"test-broker"}, } } diff --git a/contrib/kafka/pkg/controller/provider.go b/contrib/kafka/pkg/controller/provider.go index 0f6ca5631f2..2e0dec6a031 100644 --- a/contrib/kafka/pkg/controller/provider.go +++ b/contrib/kafka/pkg/controller/provider.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "github.com/knative/eventing/contrib/kafka/pkg/utils" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" @@ -45,14 +46,14 @@ type reconciler struct { client client.Client recorder record.EventRecorder logger *zap.Logger - config *KafkaProvisionerConfig + config *utils.KafkaConfig } // Verify the struct implements reconcile.Reconciler var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Provisioner controller. -func ProvideController(mgr manager.Manager, config *KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { +func ProvideController(mgr manager.Manager, config *utils.KafkaConfig, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Provisioners. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ diff --git a/contrib/kafka/pkg/controller/reconcile_test.go b/contrib/kafka/pkg/controller/reconcile_test.go index 5589fd67f7a..e19c875c03e 100644 --- a/contrib/kafka/pkg/controller/reconcile_test.go +++ b/contrib/kafka/pkg/controller/reconcile_test.go @@ -19,6 +19,7 @@ package controller import ( "context" "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "testing" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" @@ -198,8 +199,8 @@ func om(namespace, name string) metav1.ObjectMeta { } } -func getControllerConfig() *KafkaProvisionerConfig { - return &KafkaProvisionerConfig{ +func getControllerConfig() *utils.KafkaConfig { + return &utils.KafkaConfig{ Brokers: []string{"test-broker"}, } } diff --git a/contrib/kafka/pkg/controller/types.go b/contrib/kafka/pkg/controller/types.go deleted file mode 100644 index f0384e28402..00000000000 --- a/contrib/kafka/pkg/controller/types.go +++ /dev/null @@ -1,8 +0,0 @@ -package controller - -import cluster "github.com/bsm/sarama-cluster" - -type KafkaProvisionerConfig struct { - Brokers []string - ConsumerMode cluster.ConsumerMode -} diff --git a/contrib/kafka/pkg/controller/util.go b/contrib/kafka/pkg/controller/util.go deleted file mode 100644 index 7b3a56d448e..00000000000 --- a/contrib/kafka/pkg/controller/util.go +++ /dev/null @@ -1,59 +0,0 @@ -package controller - -import ( - "fmt" - "log" - "strings" - - cluster "github.com/bsm/sarama-cluster" - - "github.com/knative/pkg/configmap" -) - -const ( - BrokerConfigMapKey = "bootstrap_servers" - ConsumerModeConfigMapKey = "consumer_mode" - ConsumerModePartitionConsumerValue = "partitions" - ConsumerModeMultiplexConsumerValue = "multiplex" - KafkaChannelSeparator = "." -) - -// GetProvisionerConfig returns the details of the associated ClusterChannelProvisioner object -func GetProvisionerConfig(path string) (*KafkaProvisionerConfig, error) { - configMap, err := configmap.Load(path) - if err != nil { - return nil, fmt.Errorf("error loading provisioner configuration: %s", err) - } - - if len(configMap) == 0 { - return nil, fmt.Errorf("missing provisioner configuration") - } - - config := &KafkaProvisionerConfig{} - - if brokers, ok := configMap[BrokerConfigMapKey]; ok { - bootstrapServers := strings.Split(brokers, ",") - for _, s := range bootstrapServers { - if len(s) == 0 { - return nil, fmt.Errorf("empty %s value in provisioner configuration", BrokerConfigMapKey) - } - } - config.Brokers = bootstrapServers - } else { - return nil, fmt.Errorf("missing key %s in provisioner configuration", BrokerConfigMapKey) - } - - config.ConsumerMode = cluster.ConsumerModeMultiplex - if mode, ok := configMap[ConsumerModeConfigMapKey]; ok { - switch strings.ToLower(mode) { - case ConsumerModeMultiplexConsumerValue: - config.ConsumerMode = cluster.ConsumerModeMultiplex - case ConsumerModePartitionConsumerValue: - config.ConsumerMode = cluster.ConsumerModePartitions - default: - log.Printf("consumer_mode: %q is invalid. Using default mode %q", mode, ConsumerModeMultiplexConsumerValue) - config.ConsumerMode = cluster.ConsumerModeMultiplex - } - } - return config, nil -} diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index 0ee4a219779..556ed4d55f0 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -18,11 +18,12 @@ package dispatcher import ( "errors" "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "sync" "sync/atomic" "github.com/Shopify/sarama" - cluster "github.com/bsm/sarama-cluster" + "github.com/bsm/sarama-cluster" "github.com/google/go-cmp/cmp" "go.uber.org/zap" @@ -30,7 +31,6 @@ import ( eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" "github.com/knative/eventing/pkg/provisioners" "github.com/knative/eventing/pkg/provisioners/multichannelfanout" - topicUtils "github.com/knative/eventing/pkg/provisioners/utils" ) type KafkaDispatcher struct { @@ -46,7 +46,18 @@ type KafkaDispatcher struct { kafkaConsumers map[provisioners.ChannelReference]map[subscription]KafkaConsumer kafkaCluster KafkaCluster - logger *zap.Logger + topicFunc TopicFunc + logger *zap.Logger +} + +type TopicFunc func(separator, namespace, name string) string + +type KafkaDispatcherArgs struct { + ClientID string + Brokers []string + ConsumerMode cluster.ConsumerMode + TopicFunc TopicFunc + Logger *zap.Logger } type KafkaConsumer interface { @@ -198,8 +209,7 @@ func (d *KafkaDispatcher) Start(stopCh <-chan struct{}) error { func (d *KafkaDispatcher) subscribe(channelRef provisioners.ChannelReference, sub subscription) error { d.logger.Info("Subscribing", zap.Any("channelRef", channelRef), zap.Any("subscription", sub)) - topicName := topicUtils.TopicName(controller.KafkaChannelSeparator, channelRef.Namespace, channelRef.Name) - + topicName := d.topicFunc(utils.KafkaChannelSeparator, channelRef.Namespace, channelRef.Name) group := fmt.Sprintf("%s.%s", controller.Name, sub.UID) consumer, err := d.kafkaCluster.NewConsumer(group, []string{topicName}) @@ -300,11 +310,11 @@ func (d *KafkaDispatcher) setHostToChannelMap(hcMap map[string]provisioners.Chan d.hostToChannelMap.Store(hcMap) } -func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger *zap.Logger) (*KafkaDispatcher, error) { +func NewDispatcher(args *KafkaDispatcherArgs) (*KafkaDispatcher, error) { conf := sarama.NewConfig() conf.Version = sarama.V1_1_0_0 - conf.ClientID = controller.Name + "-dispatcher" - client, err := sarama.NewClient(brokers, conf) + conf.ClientID = args.ClientID + client, err := sarama.NewClient(args.Brokers, conf) if err != nil { return nil, fmt.Errorf("unable to create kafka client: %v", err) } @@ -315,20 +325,20 @@ func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger * } dispatcher := &KafkaDispatcher{ - dispatcher: provisioners.NewMessageDispatcher(logger.Sugar()), + dispatcher: provisioners.NewMessageDispatcher(args.Logger.Sugar()), - kafkaCluster: &saramaCluster{kafkaBrokers: brokers, consumerMode: consumerMode}, + kafkaCluster: &saramaCluster{kafkaBrokers: args.Brokers, consumerMode: args.ConsumerMode}, kafkaConsumers: make(map[provisioners.ChannelReference]map[subscription]KafkaConsumer), kafkaAsyncProducer: producer, - logger: logger, + logger: args.Logger, } receiverFunc, err := provisioners.NewMessageReceiver( func(channel provisioners.ChannelReference, message *provisioners.Message) error { - dispatcher.kafkaAsyncProducer.Input() <- toKafkaMessage(channel, message) + dispatcher.kafkaAsyncProducer.Input() <- toKafkaMessage(channel, message, args.TopicFunc) return nil }, - logger.Sugar(), + args.Logger.Sugar(), provisioners.ResolveChannelFromHostHeader(provisioners.ResolveChannelFromHostFunc(dispatcher.getChannelReferenceFromHost))) if err != nil { return nil, err @@ -336,6 +346,7 @@ func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger * dispatcher.receiver = receiverFunc dispatcher.setConfig(&multichannelfanout.Config{}) dispatcher.setHostToChannelMap(map[string]provisioners.ChannelReference{}) + dispatcher.topicFunc = args.TopicFunc return dispatcher, nil } @@ -360,9 +371,9 @@ func fromKafkaMessage(kafkaMessage *sarama.ConsumerMessage) *provisioners.Messag return &message } -func toKafkaMessage(channel provisioners.ChannelReference, message *provisioners.Message) *sarama.ProducerMessage { +func toKafkaMessage(channel provisioners.ChannelReference, message *provisioners.Message, topicFunc TopicFunc) *sarama.ProducerMessage { kafkaMessage := sarama.ProducerMessage{ - Topic: topicUtils.TopicName(controller.KafkaChannelSeparator, channel.Namespace, channel.Name), + Topic: topicFunc(utils.KafkaChannelSeparator, channel.Namespace, channel.Name), Value: sarama.ByteEncoder(message.Payload), } for h, v := range message.Headers { diff --git a/contrib/kafka/pkg/dispatcher/dispatcher_test.go b/contrib/kafka/pkg/dispatcher/dispatcher_test.go index 6d8055df186..277ece5dc28 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher_test.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher_test.go @@ -3,6 +3,7 @@ package dispatcher import ( "errors" "fmt" + "github.com/knative/eventing/pkg/provisioners/utils" "io/ioutil" "net/http" "net/http/httptest" @@ -394,8 +395,8 @@ func TestDispatcher_UpdateConfig(t *testing.T) { d := &KafkaDispatcher{ kafkaCluster: &mockSaramaCluster{closed: true}, kafkaConsumers: make(map[provisioners.ChannelReference]map[subscription]KafkaConsumer), - - logger: zap.NewNop(), + topicFunc: utils.TopicName, + logger: zap.NewNop(), } d.setConfig(&multichannelfanout.Config{}) d.setHostToChannelMap(map[string]provisioners.ChannelReference{}) @@ -501,7 +502,7 @@ func TestToKafkaMessage(t *testing.T) { }, Value: sarama.ByteEncoder(data), } - got := toKafkaMessage(channelRef, msg) + got := toKafkaMessage(channelRef, msg, utils.TopicName) if diff := cmp.Diff(want, got, cmpopts.IgnoreUnexported(sarama.ProducerMessage{})); diff != "" { t.Errorf("unexpected message (-want, +got) = %s", diff) } @@ -534,6 +535,7 @@ func TestSubscribe(t *testing.T) { kafkaConsumers: make(map[provisioners.ChannelReference]map[subscription]KafkaConsumer), dispatcher: provisioners.NewMessageDispatcher(zap.NewNop().Sugar()), logger: zap.NewNop(), + topicFunc: utils.TopicName, } testHandler := &dispatchTestHandler{ @@ -580,6 +582,7 @@ func TestPartitionConsumer(t *testing.T) { kafkaConsumers: make(map[provisioners.ChannelReference]map[subscription]KafkaConsumer), dispatcher: provisioners.NewMessageDispatcher(zap.NewNop().Sugar()), logger: zap.NewNop(), + topicFunc: utils.TopicName, } testHandler := &dispatchTestHandler{ t: t, @@ -627,6 +630,7 @@ func TestSubscribeError(t *testing.T) { d := &KafkaDispatcher{ kafkaCluster: sc, logger: zap.NewNop(), + topicFunc: utils.TopicName, } channelRef := provisioners.ChannelReference{ diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go new file mode 100644 index 00000000000..48b9416b80d --- /dev/null +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -0,0 +1,462 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/json" + "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/utils" + "github.com/knative/eventing/pkg/reconciler/names" + "reflect" + "time" + + "github.com/Shopify/sarama" + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + messaginginformers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1" + listers "github.com/knative/eventing/contrib/kafka/pkg/client/listers/messaging/v1alpha1" + "github.com/knative/eventing/contrib/kafka/pkg/reconciler/controller/resources" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/reconciler" + "github.com/knative/pkg/controller" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + appsv1informers "k8s.io/client-go/informers/apps/v1" + corev1informers "k8s.io/client-go/informers/core/v1" + appsv1listers "k8s.io/client-go/listers/apps/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" +) + +const ( + // ReconcilerName is the name of the reconciler. + ReconcilerName = "KafkaChannels" + + // controllerAgentName is the string used by this controller to identify + // itself when creating events. + controllerAgentName = "kafka-ch-controller" + + finalizerName = controllerAgentName + + // Name of the corev1.Events emitted from the reconciliation process. + channelReconciled = "ChannelReconciled" + channelReconcileFailed = "ChannelReconcileFailed" + channelUpdateStatusFailed = "ChannelUpdateStatusFailed" +) + +// Reconciler reconciles Kafka Channels. +type Reconciler struct { + *reconciler.Base + + dispatcherNamespace string + dispatcherDeploymentName string + dispatcherServiceName string + + kafkaConfig *utils.KafkaConfig + + // Using a shared kafkaClusterAdmin does not work currently because of an issue with + // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. + kafkaClusterAdmin sarama.ClusterAdmin + + eventingClientSet clientset.Interface + kafkachannelLister listers.KafkaChannelLister + kafkachannelInformer cache.SharedIndexInformer + deploymentLister appsv1listers.DeploymentLister + serviceLister corev1listers.ServiceLister + endpointsLister corev1listers.EndpointsLister + impl *controller.Impl +} + +var ( + deploymentGVK = appsv1.SchemeGroupVersion.WithKind("Deployment") + serviceGVK = corev1.SchemeGroupVersion.WithKind("Service") +) + +// Check that our Reconciler implements controller.Reconciler. +var _ controller.Reconciler = (*Reconciler)(nil) + +// Check that our Reconciler implements cache.ResourceEventHandler +var _ cache.ResourceEventHandler = (*Reconciler)(nil) + +// NewController initializes the controller and is called by the generated code. +// Registers event handlers to enqueue events. +func NewController( + opt reconciler.Options, + eventingClientSet clientset.Interface, + kafkaConfig *utils.KafkaConfig, + dispatcherNamespace string, + dispatcherDeploymentName string, + dispatcherServiceName string, + kafkachannelInformer messaginginformers.KafkaChannelInformer, + deploymentInformer appsv1informers.DeploymentInformer, + serviceInformer corev1informers.ServiceInformer, + endpointsInformer corev1informers.EndpointsInformer, +) *controller.Impl { + + r := &Reconciler{ + Base: reconciler.NewBase(opt, controllerAgentName), + dispatcherNamespace: dispatcherNamespace, + dispatcherDeploymentName: dispatcherDeploymentName, + dispatcherServiceName: dispatcherServiceName, + kafkaConfig: kafkaConfig, + eventingClientSet: eventingClientSet, + kafkachannelLister: kafkachannelInformer.Lister(), + kafkachannelInformer: kafkachannelInformer.Informer(), + deploymentLister: deploymentInformer.Lister(), + serviceLister: serviceInformer.Lister(), + endpointsLister: endpointsInformer.Lister(), + } + r.impl = controller.NewImpl(r, r.Logger, ReconcilerName) + + r.Logger.Info("Setting up event handlers") + kafkachannelInformer.Informer().AddEventHandler(controller.HandleAll(r.impl.Enqueue)) + + // Set up watches for dispatcher resources we care about, since any changes to these + // resources will affect our Channels. So, set up a watch here, that will cause + // a global Resync for all the channels to take stock of their health when these change. + deploymentInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.FilterWithNameAndNamespace(dispatcherNamespace, dispatcherDeploymentName), + Handler: r, + }) + serviceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.FilterWithNameAndNamespace(dispatcherNamespace, dispatcherServiceName), + Handler: r, + }) + endpointsInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.FilterWithNameAndNamespace(dispatcherNamespace, dispatcherServiceName), + Handler: r, + }) + return r.impl +} + +// cache.ResourceEventHandler implementation. +// These 3 functions just cause a Global Resync of the channels, because any changes here +// should be reflected onto the channels. +func (r *Reconciler) OnAdd(obj interface{}) { + r.impl.GlobalResync(r.kafkachannelInformer) +} + +func (r *Reconciler) OnUpdate(old, new interface{}) { + r.impl.GlobalResync(r.kafkachannelInformer) +} + +func (r *Reconciler) OnDelete(obj interface{}) { + r.impl.GlobalResync(r.kafkachannelInformer) +} + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the KafkaChannel resource +// with the current status of the resource. +func (r *Reconciler) Reconcile(ctx context.Context, key string) error { + // Convert the namespace/name string into a distinct namespace and name. + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logging.FromContext(ctx).Error("invalid resource key") + return nil + } + + // Get the KafkaChannel resource with this namespace/name. + original, err := r.kafkachannelLister.KafkaChannels(namespace).Get(name) + if apierrs.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing. + logging.FromContext(ctx).Error("KafkaChannel key in work queue no longer exists") + return nil + } else if err != nil { + return err + } + + // Don't modify the informers copy. + channel := original.DeepCopy() + + // Reconcile this copy of the KafkaChannel and then write back any status updates regardless of + // whether the reconcile error out. + reconcileErr := r.reconcile(ctx, channel) + if reconcileErr != nil { + logging.FromContext(ctx).Error("Error reconciling KafkaChannel", zap.Error(reconcileErr)) + r.Recorder.Eventf(channel, corev1.EventTypeWarning, channelReconcileFailed, "KafkaChannel reconciliation failed: %v", reconcileErr) + } else { + logging.FromContext(ctx).Debug("KafkaChannel reconciled") + r.Recorder.Event(channel, corev1.EventTypeNormal, channelReconciled, "KafkaChannel reconciled") + } + + if _, updateStatusErr := r.updateStatus(ctx, channel); updateStatusErr != nil { + logging.FromContext(ctx).Error("Failed to update KafkaChannel status", zap.Error(updateStatusErr)) + r.Recorder.Eventf(channel, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update KafkaChannel's status: %v", updateStatusErr) + return updateStatusErr + } + + // Requeue if the resource is not ready + return reconcileErr +} + +func (r *Reconciler) reconcile(ctx context.Context, kc *v1alpha1.KafkaChannel) error { + kc.Status.InitializeConditions() + + logger := logging.FromContext(ctx) + // Verify channel is valid. + if err := kc.Validate(ctx); err != nil { + logger.Error("Invalid kafka channel", zap.String("channel", kc.Name), zap.Error(err)) + return err + } + + kafkaClusterAdmin, err := r.createClient(ctx, kc) + if err != nil { + logger.Error("Unable to build kafka admin client", zap.String("channel", kc.Name), zap.Error(err)) + return err + } + + // See if the channel has been deleted. + if kc.DeletionTimestamp != nil { + if err := r.deleteTopic(ctx, kc, kafkaClusterAdmin); err != nil { + return err + } + removeFinalizer(kc) + _, err := r.eventingClientSet.MessagingV1alpha1().KafkaChannels(kc.Namespace).Update(kc) + return err + } + + // If we are adding the finalizer for the first time, then ensure that finalizer is persisted + // before manipulating Kafka. + if err := r.ensureFinalizer(kc); err != nil { + return err + } + + // We reconcile the status of the Channel by looking at: + // 1. Kafka topic used by the channel. + // 2. Dispatcher Deployment for it's readiness. + // 3. Dispatcher k8s Service for it's existence. + // 4. Dispatcher endpoints to ensure that there's something backing the Service. + // 5. K8s service representing the channel that will use ExternalName to point to the Dispatcher k8s service. + + if err := r.createTopic(ctx, kc, kafkaClusterAdmin); err != nil { + kc.Status.MarkTopicFailed("TopicCreateFailed", "error while creating topic: %s", err) + return err + } + kc.Status.MarkTopicTrue() + + // Get the Dispatcher Deployment and propagate the status to the Channel + d, err := r.deploymentLister.Deployments(r.dispatcherNamespace).Get(r.dispatcherDeploymentName) + if err != nil { + if apierrs.IsNotFound(err) { + kc.Status.MarkDispatcherFailed("DispatcherDeploymentDoesNotExist", "Dispatcher Deployment does not exist") + } else { + logger.Error("Unable to get the dispatcher Deployment", zap.Error(err)) + kc.Status.MarkDispatcherFailed("DispatcherDeploymentGetFailed", "Failed to get dispatcher Deployment") + } + return err + } + kc.Status.PropagateDispatcherStatus(&d.Status) + + // Get the Dispatcher Service and propagate the status to the Channel in case it does not exist. + // We don't do anything with the service because it's status contains nothing useful, so just do + // an existence check. Then below we check the endpoints targeting it. + _, err = r.serviceLister.Services(r.dispatcherNamespace).Get(r.dispatcherServiceName) + if err != nil { + if apierrs.IsNotFound(err) { + kc.Status.MarkServiceFailed("DispatcherServiceDoesNotExist", "Dispatcher Service does not exist") + } else { + logger.Error("Unable to get the dispatcher service", zap.Error(err)) + kc.Status.MarkServiceFailed("DispatcherServiceGetFailed", "Failed to get dispatcher service") + } + return err + } + kc.Status.MarkServiceTrue() + + // Get the Dispatcher Service Endpoints and propagate the status to the Channel + // endpoints has the same name as the service, so not a bug. + e, err := r.endpointsLister.Endpoints(r.dispatcherNamespace).Get(r.dispatcherServiceName) + if err != nil { + if apierrs.IsNotFound(err) { + kc.Status.MarkEndpointsFailed("DispatcherEndpointsDoesNotExist", "Dispatcher Endpoints does not exist") + } else { + logger.Error("Unable to get the dispatcher endpoints", zap.Error(err)) + kc.Status.MarkEndpointsFailed("DispatcherEndpointsGetFailed", "Failed to get dispatcher endpoints") + } + return err + } + + if len(e.Subsets) == 0 { + logger.Error("No endpoints found for Dispatcher service", zap.Error(err)) + kc.Status.MarkEndpointsFailed("DispatcherEndpointsNotReady", "There are no endpoints ready for Dispatcher service") + return fmt.Errorf("there are no endpoints ready for Dispatcher service %s", r.dispatcherServiceName) + } + kc.Status.MarkEndpointsTrue() + + // Reconcile the k8s service representing the actual Channel. It points to the Dispatcher service via ExternalName + svc, err := r.reconcileChannelService(ctx, kc) + if err != nil { + kc.Status.MarkChannelServiceFailed("ChannelServiceFailed", fmt.Sprintf("Channel Service failed: %s", err)) + return err + } + kc.Status.MarkChannelServiceTrue() + kc.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace)) + + // close the connection + err = kafkaClusterAdmin.Close() + if err != nil { + logger.Error("Error closing the connection", zap.Error(err)) + return err + } + + // Ok, so now the Dispatcher Deployment & Service have been created, we're golden since the + // dispatcher watches the Channel and where it needs to dispatch events to. + return nil +} + +func (r *Reconciler) reconcileChannelService(ctx context.Context, channel *v1alpha1.KafkaChannel) (*corev1.Service, error) { + logger := logging.FromContext(ctx) + // Get the Service and propagate the status to the Channel in case it does not exist. + // We don't do anything with the service because it's status contains nothing useful, so just do + // an existence check. Then below we check the endpoints targeting it. + // We may change this name later, so we have to ensure we use proper addressable when resolving these. + svc, err := r.serviceLister.Services(channel.Namespace).Get(resources.MakeChannelServiceName(channel.Name)) + if err != nil { + if apierrs.IsNotFound(err) { + svc, err = resources.MakeK8sService(channel, resources.ExternalService(r.dispatcherNamespace, r.dispatcherServiceName)) + if err != nil { + logger.Error("Failed to create the channel service object", zap.Error(err)) + return nil, err + } + svc, err = r.KubeClientSet.CoreV1().Services(channel.Namespace).Create(svc) + if err != nil { + logger.Error("Failed to create the channel service", zap.Error(err)) + return nil, err + } + return svc, nil + } else { + logger.Error("Unable to get the channel service", zap.Error(err)) + } + return nil, err + } + // Check to make sure that the KafkaChannel owns this service and if not, complain. + if !metav1.IsControlledBy(svc, channel) { + return nil, fmt.Errorf("kafkachannel: %s/%s does not own Service: %q", channel.Namespace, channel.Name, svc.Name) + } + return svc, nil +} + +func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) { + kc, err := r.kafkachannelLister.KafkaChannels(desired.Namespace).Get(desired.Name) + if err != nil { + return nil, err + } + + if reflect.DeepEqual(kc.Status, desired.Status) { + return kc, nil + } + + becomesReady := desired.Status.IsReady() && !kc.Status.IsReady() + + // Don't modify the informers copy. + existing := kc.DeepCopy() + existing.Status = desired.Status + + new, err := r.eventingClientSet.MessagingV1alpha1().KafkaChannels(desired.Namespace).UpdateStatus(existing) + if err == nil && becomesReady { + duration := time.Since(new.ObjectMeta.CreationTimestamp.Time) + r.Logger.Infof("KafkaChannel %q became ready after %v", kc.Name, duration) + if err := r.StatsReporter.ReportReady("Channel", kc.Namespace, kc.Name, duration); err != nil { + r.Logger.Infof("Failed to record ready for KafkaChannel %q: %v", kc.Name, err) + } + } + return new, err +} + +func (r *Reconciler) createClient(ctx context.Context, kc *v1alpha1.KafkaChannel) (sarama.ClusterAdmin, error) { + // We don't currently initialize r.kafkaClusterAdmin, hence we end up creating the cluster admin client every time. + // This is because of an issue with Shopify/sarama. See https://github.com/Shopify/sarama/issues/1162. + // Once the issue is fixed we should use a shared cluster admin client. Also, r.kafkaClusterAdmin is currently + // used to pass a fake admin client in the tests. + kafkaClusterAdmin := r.kafkaClusterAdmin + if kafkaClusterAdmin == nil { + var err error + kafkaClusterAdmin, err = resources.MakeClient(controllerAgentName, r.kafkaConfig.Brokers) + if err != nil { + return nil, err + } + } + return kafkaClusterAdmin, nil +} + +func (r *Reconciler) createTopic(ctx context.Context, channel *v1alpha1.KafkaChannel, kafkaClusterAdmin sarama.ClusterAdmin) error { + logger := logging.FromContext(ctx) + + topicName := utils.TopicName(utils.KafkaChannelSeparator, channel.Namespace, channel.Name) + logger.Info("Creating topic on Kafka cluster", zap.String("topic", topicName)) + err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ + ReplicationFactor: channel.Spec.ReplicationFactor, + NumPartitions: channel.Spec.NumPartitions, + }, false) + if err == sarama.ErrTopicAlreadyExists { + return nil + } else if err != nil { + logger.Error("Error creating topic", zap.String("topic", topicName), zap.Error(err)) + } else { + logger.Info("Successfully created topic", zap.String("topic", topicName)) + } + return err +} + +func (r *Reconciler) deleteTopic(ctx context.Context, channel *v1alpha1.KafkaChannel, kafkaClusterAdmin sarama.ClusterAdmin) error { + logger := logging.FromContext(ctx) + + topicName := utils.TopicName(utils.KafkaChannelSeparator, channel.Namespace, channel.Name) + logger.Info("Deleting topic on Kafka Cluster", zap.String("topic", topicName)) + err := kafkaClusterAdmin.DeleteTopic(topicName) + if err == sarama.ErrUnknownTopicOrPartition { + return nil + } else if err != nil { + logger.Error("Error deleting topic", zap.String("topic", topicName), zap.Error(err)) + } else { + logger.Info("Successfully deleted topic", zap.String("topic", topicName)) + } + return err +} + +func (r *Reconciler) ensureFinalizer(channel *v1alpha1.KafkaChannel) error { + finalizers := sets.NewString(channel.Finalizers...) + if finalizers.Has(finalizerName) { + return nil + } + + mergePatch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": append(channel.Finalizers, finalizerName), + "resourceVersion": channel.ResourceVersion, + }, + } + + patch, err := json.Marshal(mergePatch) + if err != nil { + return err + } + + _, err = r.eventingClientSet.MessagingV1alpha1().KafkaChannels(channel.Namespace).Patch(channel.Name, types.MergePatchType, patch) + return err +} + +func removeFinalizer(channel *v1alpha1.KafkaChannel) { + finalizers := sets.NewString(channel.Finalizers...) + finalizers.Delete(finalizerName) + channel.Finalizers = finalizers.List() +} diff --git a/contrib/kafka/pkg/reconciler/controller/resources/client.go b/contrib/kafka/pkg/reconciler/controller/resources/client.go new file mode 100644 index 00000000000..d2af5852ecd --- /dev/null +++ b/contrib/kafka/pkg/reconciler/controller/resources/client.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "github.com/Shopify/sarama" +) + +func MakeClient(clientID string, bootstrapServers []string) (sarama.ClusterAdmin, error) { + saramaConf := sarama.NewConfig() + saramaConf.Version = sarama.V1_1_0_0 + saramaConf.ClientID = clientID + return sarama.NewClusterAdmin(bootstrapServers, saramaConf) +} diff --git a/contrib/kafka/pkg/reconciler/controller/resources/service.go b/contrib/kafka/pkg/reconciler/controller/resources/service.go new file mode 100644 index 00000000000..58dad0589be --- /dev/null +++ b/contrib/kafka/pkg/reconciler/controller/resources/service.go @@ -0,0 +1,96 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "github.com/knative/eventing/pkg/utils" + "github.com/knative/pkg/kmeta" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + portName = "http" + portNumber = 80 + MessagingRoleLabel = "messaging.knative.dev/role" + MessagingRole = "kafka-channel" +) + +// ServiceOption can be used to optionally modify the K8s service in MakeK8sService. +type ServiceOption func(*corev1.Service) error + +func MakeExternalServiceAddress(namespace, service string) string { + return fmt.Sprintf("%s.%s.svc.%s", service, namespace, utils.GetClusterDomainName()) +} + +func MakeChannelServiceName(name string) string { + return fmt.Sprintf("%s-kn-channel", name) +} + +// ExternalService is a functional option for MakeK8sService to create a K8s service of type ExternalName +// pointing to the specified service in a namespace. +func ExternalService(namespace, service string) ServiceOption { + return func(svc *corev1.Service) error { + // TODO this overrides the current serviceSpec. Is this correct? + svc.Spec = corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: MakeExternalServiceAddress(namespace, service), + } + return nil + } +} + +// MakeK8sService creates a new K8s Service for a Channel resource. It also sets the appropriate +// OwnerReferences on the resource so handleObject can discover the Channel resource that 'owns' it. +// As well as being garbage collected when the Channel is deleted. +func MakeK8sService(kc *v1alpha1.KafkaChannel, opts ...ServiceOption) (*corev1.Service, error) { + // Add annotations + svc := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: MakeChannelServiceName(kc.ObjectMeta.Name), + Namespace: kc.Namespace, + Labels: map[string]string{ + MessagingRoleLabel: MessagingRole, + }, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(kc), + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: portName, + Protocol: corev1.ProtocolTCP, + Port: portNumber, + }, + }, + }, + } + for _, opt := range opts { + if err := opt(svc); err != nil { + return nil, err + } + } + return svc, nil +} diff --git a/contrib/kafka/pkg/reconciler/controller/resources/service_test.go b/contrib/kafka/pkg/reconciler/controller/resources/service_test.go new file mode 100644 index 00000000000..66dac7ff372 --- /dev/null +++ b/contrib/kafka/pkg/reconciler/controller/resources/service_test.go @@ -0,0 +1,143 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "errors" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "github.com/knative/pkg/kmeta" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + serviceName = "my-test-service" + kcName = "my-test-kc" + testNS = "my-test-ns" + dispatcherNS = "dispatcher-namespace" + dispatcherName = "dispatcher-name" +) + +func TestMakeExternalServiceAddress(t *testing.T) { + if want, got := "my-test-service.my-test-ns.svc.cluster.local", MakeExternalServiceAddress(testNS, serviceName); want != got { + t.Errorf("Want: %q got %q", want, got) + } +} + +func TestMakeChannelServiceAddress(t *testing.T) { + if want, got := "my-test-kc-kn-channel", MakeChannelServiceName(kcName); want != got { + t.Errorf("Want: %q got %q", want, got) + } +} + +func TestMakeService(t *testing.T) { + imc := &v1alpha1.KafkaChannel{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcName, + Namespace: testNS, + }, + } + want := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-kn-channel", kcName), + Namespace: testNS, + Labels: map[string]string{ + MessagingRoleLabel: MessagingRole, + }, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(imc), + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: portName, + Protocol: corev1.ProtocolTCP, + Port: portNumber, + }, + }, + }, + } + + got, err := MakeK8sService(imc) + if err != nil { + t.Fatalf("Failed to create new service: %s", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("unexpected condition (-want, +got) = %v", diff) + } +} + +func TestMakeServiceWithExternal(t *testing.T) { + imc := &v1alpha1.KafkaChannel{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcName, + Namespace: testNS, + }, + } + want := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-kn-channel", kcName), + Namespace: testNS, + Labels: map[string]string{ + MessagingRoleLabel: MessagingRole, + }, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(imc), + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "dispatcher-name.dispatcher-namespace.svc.cluster.local", + }, + } + + got, err := MakeK8sService(imc, ExternalService(dispatcherNS, dispatcherName)) + if err != nil { + t.Fatalf("Failed to create new service: %s", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("unexpected condition (-want, +got) = %v", diff) + } +} + +func TestMakeServiceWithFailingOption(t *testing.T) { + imc := &v1alpha1.KafkaChannel{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcName, + Namespace: testNS, + }, + } + _, err := MakeK8sService(imc, func(svc *corev1.Service) error { return errors.New("test-induced failure") }) + if err == nil { + t.Fatalf("Expcted error from new service but got none") + } +} diff --git a/contrib/kafka/pkg/reconciler/controller/resources/topic.go b/contrib/kafka/pkg/reconciler/controller/resources/topic.go new file mode 100644 index 00000000000..b257b8a38fe --- /dev/null +++ b/contrib/kafka/pkg/reconciler/controller/resources/topic.go @@ -0,0 +1,30 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" +) + +const ( + kafkaChannelPrefix = "knative-messaging-kafka-channel" +) + +func MakeTopicName(channel *v1alpha1.KafkaChannel) string { + return fmt.Sprintf("%s.%s.%s", kafkaChannelPrefix, channel.Namespace, channel.Name) +} diff --git a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go new file mode 100644 index 00000000000..9c2a54a4668 --- /dev/null +++ b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go @@ -0,0 +1,141 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + messaginginformers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1" + listers "github.com/knative/eventing/contrib/kafka/pkg/client/listers/messaging/v1alpha1" + "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/provisioners/fanout" + "github.com/knative/eventing/pkg/provisioners/multichannelfanout" + "github.com/knative/eventing/pkg/reconciler" + "github.com/knative/pkg/controller" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +const ( + // ReconcilerName is the name of the reconciler. + ReconcilerName = "KafkaChannels" + + // controllerAgentName is the string used by this controller to identify + // itself when creating events. + controllerAgentName = "kafka-ch-dispatcher" +) + +// Reconciler reconciles Kafka Channels. +type Reconciler struct { + *reconciler.Base + + kafkaDispatcher *dispatcher.KafkaDispatcher + + eventingClientSet clientset.Interface + kafkachannelLister listers.KafkaChannelLister + kafkachannelInformer cache.SharedIndexInformer + impl *controller.Impl +} + +// Check that our Reconciler implements controller.Reconciler. +var _ controller.Reconciler = (*Reconciler)(nil) + +// NewController initializes the controller and is called by the generated code. +// Registers event handlers to enqueue events. +func NewController( + opt reconciler.Options, + eventingClientSet clientset.Interface, + kafkaDispatcher *dispatcher.KafkaDispatcher, + kafkachannelInformer messaginginformers.KafkaChannelInformer, +) *controller.Impl { + + r := &Reconciler{ + Base: reconciler.NewBase(opt, controllerAgentName), + kafkaDispatcher: kafkaDispatcher, + eventingClientSet: eventingClientSet, + kafkachannelLister: kafkachannelInformer.Lister(), + kafkachannelInformer: kafkachannelInformer.Informer(), + } + r.impl = controller.NewImpl(r, r.Logger, ReconcilerName) + + r.Logger.Info("Setting up event handlers") + + // Watch for kafka channels. + kafkachannelInformer.Informer().AddEventHandler(controller.HandleAll(r.impl.Enqueue)) + + return r.impl +} + +func (r *Reconciler) Reconcile(ctx context.Context, key string) error { + // Convert the namespace/name string into a distinct namespace and name. + _, _, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logging.FromContext(ctx).Error("invalid resource key") + return nil + } + + // This is a special Reconciler that does the following: + // 1. Lists the kafka channels. + // 2. Creates a multi-channel-fanout-config. + // 3. Calls the kafka dispatcher's updateConfig func with the new multi-channel-fanout-config. + + channels, err := r.kafkachannelLister.List(labels.Everything()) + if err != nil { + logging.FromContext(ctx).Error("Error listing kafka channels") + return err + } + + kafkaChannels := make([]*v1alpha1.KafkaChannel, 0) + for _, channel := range channels { + if channel.Status.IsReady() { + kafkaChannels = append(kafkaChannels, channel) + } + } + + config := r.newConfigFromKafkaChannels(kafkaChannels) + err = r.kafkaDispatcher.UpdateConfig(config) + if err != nil { + logging.FromContext(ctx).Error("Error updating kafka dispatcher config") + return err + } + + return nil +} + +// newConfigFromKafkaChannels creates a new Config from the list of kafka channels. +func (r *Reconciler) newConfigFromKafkaChannels(channels []*v1alpha1.KafkaChannel) *multichannelfanout.Config { + cc := make([]multichannelfanout.ChannelConfig, 0) + for _, c := range channels { + channelConfig := multichannelfanout.ChannelConfig{ + Namespace: c.Namespace, + Name: c.Name, + HostName: c.Status.Address.Hostname, + } + if c.Spec.Subscribable != nil { + channelConfig.FanoutConfig = fanout.Config{ + AsyncHandler: true, + Subscriptions: c.Spec.Subscribable.Subscribers, + } + } + cc = append(cc, channelConfig) + } + return &multichannelfanout.Config{ + ChannelConfigs: cc, + } +} diff --git a/contrib/kafka/pkg/utils/util.go b/contrib/kafka/pkg/utils/util.go new file mode 100644 index 00000000000..63e379a54f6 --- /dev/null +++ b/contrib/kafka/pkg/utils/util.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "fmt" + "log" + "strings" + + "github.com/bsm/sarama-cluster" + + "github.com/knative/pkg/configmap" +) + +const ( + BrokerConfigMapKey = "bootstrap_servers" + ConsumerModeConfigMapKey = "consumer_mode" + ConsumerModePartitionConsumerValue = "partitions" + ConsumerModeMultiplexConsumerValue = "multiplex" + KafkaChannelSeparator = "." + + // DefaultNumPartitions defines the default number of partitions + DefaultNumPartitions = 1 + + // DefaultReplicationFactor defines the default number of replications + DefaultReplicationFactor = 1 + + knativeKafkaTopicPrefix = "knative-messaging-kafka" +) + +type KafkaConfig struct { + Brokers []string + ConsumerMode cluster.ConsumerMode +} + +// GetKafkaConfig returns the details of the Kafka cluster. +func GetKafkaConfig(path string) (*KafkaConfig, error) { + configMap, err := configmap.Load(path) + if err != nil { + return nil, fmt.Errorf("error loading configuration: %s", err) + } + + if len(configMap) == 0 { + return nil, fmt.Errorf("missing configuration") + } + + config := &KafkaConfig{} + + if brokers, ok := configMap[BrokerConfigMapKey]; ok { + bootstrapServers := strings.Split(brokers, ",") + for _, s := range bootstrapServers { + if len(s) == 0 { + return nil, fmt.Errorf("empty %s value in configuration", BrokerConfigMapKey) + } + } + config.Brokers = bootstrapServers + } else { + return nil, fmt.Errorf("missing key %s in configuration", BrokerConfigMapKey) + } + + config.ConsumerMode = cluster.ConsumerModeMultiplex + if mode, ok := configMap[ConsumerModeConfigMapKey]; ok { + switch strings.ToLower(mode) { + case ConsumerModeMultiplexConsumerValue: + config.ConsumerMode = cluster.ConsumerModeMultiplex + case ConsumerModePartitionConsumerValue: + config.ConsumerMode = cluster.ConsumerModePartitions + default: + log.Printf("consumer_mode: %q is invalid. Using default mode %q", mode, ConsumerModeMultiplexConsumerValue) + config.ConsumerMode = cluster.ConsumerModeMultiplex + } + } + return config, nil +} + +func TopicName(separator, namespace, name string) string { + topic := []string{knativeKafkaTopicPrefix, namespace, name} + return strings.Join(topic, separator) +} diff --git a/contrib/kafka/pkg/controller/util_test.go b/contrib/kafka/pkg/utils/util_test.go similarity index 72% rename from contrib/kafka/pkg/controller/util_test.go rename to contrib/kafka/pkg/utils/util_test.go index 526f183bd86..f599351f2bd 100644 --- a/contrib/kafka/pkg/controller/util_test.go +++ b/contrib/kafka/pkg/utils/util_test.go @@ -1,4 +1,20 @@ -package controller +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils import ( "io/ioutil" @@ -12,53 +28,53 @@ import ( _ "github.com/knative/pkg/system/testing" ) -func TestGetProvisionerConfigBrokers(t *testing.T) { +func TestGetKafkaConfig(t *testing.T) { testCases := []struct { name string data map[string]string path string getError string - expected *KafkaProvisionerConfig + expected *KafkaConfig }{ { name: "invalid config path", path: "/tmp/does_not_exist", - getError: "error loading provisioner configuration: lstat /tmp/does_not_exist: no such file or directory", + getError: "error loading configuration: lstat /tmp/does_not_exist: no such file or directory", }, { name: "configmap with no data", data: map[string]string{}, - getError: "missing provisioner configuration", + getError: "missing configuration", }, { name: "configmap with no bootstrap_servers key", data: map[string]string{"key": "value"}, - getError: "missing key bootstrap_servers in provisioner configuration", + getError: "missing key bootstrap_servers in configuration", }, { name: "configmap with empty bootstrap_servers value", data: map[string]string{"bootstrap_servers": ""}, - getError: "empty bootstrap_servers value in provisioner configuration", + getError: "empty bootstrap_servers value in configuration", }, { name: "single bootstrap_servers", data: map[string]string{"bootstrap_servers": "kafkabroker.kafka:9092"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker.kafka:9092"}, }, }, { name: "multiple bootstrap_servers", data: map[string]string{"bootstrap_servers": "kafkabroker1.kafka:9092,kafkabroker2.kafka:9092"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker1.kafka:9092", "kafkabroker2.kafka:9092"}, }, }, { name: "partition consumer", data: map[string]string{"bootstrap_servers": "kafkabroker.kafka:9092", "consumer_mode": "partitions"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker.kafka:9092"}, ConsumerMode: cluster.ConsumerModePartitions, }, @@ -66,7 +82,7 @@ func TestGetProvisionerConfigBrokers(t *testing.T) { { name: "default multiplex", data: map[string]string{"bootstrap_servers": "kafkabroker.kafka:9092", "consumer_mode": "multiplex"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker.kafka:9092"}, ConsumerMode: cluster.ConsumerModeMultiplex, }, @@ -74,7 +90,7 @@ func TestGetProvisionerConfigBrokers(t *testing.T) { { name: "default multiplex from invalid consumer_mode", data: map[string]string{"bootstrap_servers": "kafkabroker.kafka:9092", "consumer_mode": "foo"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker.kafka:9092"}, ConsumerMode: cluster.ConsumerModeMultiplex, }, @@ -103,7 +119,7 @@ func TestGetProvisionerConfigBrokers(t *testing.T) { tc.path = dir } - got, err := GetProvisionerConfig(tc.path) + got, err := GetKafkaConfig(tc.path) if tc.getError != "" { if err == nil { diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index b7e3fc9e7ab..d638b309d80 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -37,5 +37,18 @@ ${CODEGEN_PKG}/generate-groups.sh "deepcopy" \ "duck:v1alpha1" \ --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt +CONTRIB_DIRS=(contrib/kafka/pkg) + +for DIR in "${CONTRIB_DIRS[@]}"; do + # generate the code with: + # --output-base because this script should also be able to run inside the vendor dir of + # k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir + # instead of the $GOPATH directly. For normal projects this can be dropped. + ${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ + github.com/knative/eventing/${DIR}/client github.com/knative/eventing/${DIR}/apis \ + "messaging:v1alpha1" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt +done + # Make sure our dependencies are up-to-date ${REPO_ROOT_DIR}/hack/update-deps.sh diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh index 85dc69d79f0..c96b695d477 100755 --- a/test/e2e-tests.sh +++ b/test/e2e-tests.sh @@ -55,7 +55,7 @@ readonly STRIMZI_INSTALLATION_CONFIG="$(mktemp)" # Kafka cluster CR config file. readonly KAFKA_INSTALLATION_CONFIG="test/config/100-kafka-persistent-single-2.1.0.yaml" # Kafka provisioner config template. -readonly KAFKA_CONFIG_TEMPLATE="contrib/kafka/config/kafka.yaml" +readonly KAFKA_CONFIG_TEMPLATE="contrib/kafka/config/provisioner/kafka.yaml" # Real Kafka provisioner config, generated from the template. readonly KAFKA_CONFIG="$(mktemp)" # Kafka cluster URL for our installation