From 259f72736a055fed0a77d0429b476d9d3c93464e Mon Sep 17 00:00:00 2001 From: nachocano Date: Mon, 20 May 2019 10:06:25 -0700 Subject: [PATCH 01/64] messaging --- contrib/kafka/pkg/apis/messaging/register.go | 21 ++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 contrib/kafka/pkg/apis/messaging/register.go diff --git a/contrib/kafka/pkg/apis/messaging/register.go b/contrib/kafka/pkg/apis/messaging/register.go new file mode 100644 index 00000000000..8f678adcd23 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/register.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package messaging + +const ( + GroupName = "messaging.knative.dev" +) From 5f5fa9dbcee32ecec33d0a1242ecb1a39923580c Mon Sep 17 00:00:00 2001 From: nachocano Date: Mon, 20 May 2019 10:43:46 -0700 Subject: [PATCH 02/64] adding types --- .../messaging/v1alpha1/crd_validation_test.go | 43 ++ .../kafka/pkg/apis/messaging/v1alpha1/doc.go | 20 + .../v1alpha1/kafka_channel_defaults.go | 27 ++ .../v1alpha1/kafka_channel_lifecycle.go | 123 ++++++ .../v1alpha1/kafka_channel_lifecycle_test.go | 369 ++++++++++++++++++ .../messaging/v1alpha1/kafka_channel_types.go | 88 +++++ .../v1alpha1/kafka_channel_validation.go | 43 ++ .../v1alpha1/kafka_channel_validation_test.go | 83 ++++ .../pkg/apis/messaging/v1alpha1/register.go | 53 +++ .../v1alpha1/zz_generated.deepcopy.go | 126 ++++++ .../client/clientset/versioned/clientset.go | 98 +++++ .../pkg/client/clientset/versioned/doc.go | 20 + .../versioned/fake/clientset_generated.go | 82 ++++ .../client/clientset/versioned/fake/doc.go | 20 + .../clientset/versioned/fake/register.go | 56 +++ .../client/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 56 +++ .../versioned/typed/messaging/v1alpha1/doc.go | 20 + .../typed/messaging/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_kafkachannel.go | 140 +++++++ .../v1alpha1/fake/fake_messaging_client.go | 40 ++ .../messaging/v1alpha1/generated_expansion.go | 21 + .../typed/messaging/v1alpha1/kafkachannel.go | 174 +++++++++ .../messaging/v1alpha1/messaging_client.go | 90 +++++ .../informers/externalversions/factory.go | 180 +++++++++ .../informers/externalversions/generic.go | 62 +++ .../internalinterfaces/factory_interfaces.go | 38 ++ .../externalversions/messaging/interface.go | 46 +++ .../messaging/v1alpha1/interface.go | 45 +++ .../messaging/v1alpha1/kafkachannel.go | 89 +++++ .../messaging/v1alpha1/expansion_generated.go | 27 ++ .../messaging/v1alpha1/kafkachannel.go | 94 +++++ hack/update-codegen.sh | 13 + 33 files changed, 2426 insertions(+) create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/crd_validation_test.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/doc.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/register.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/zz_generated.deepcopy.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/clientset.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/doc.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/fake/clientset_generated.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/fake/doc.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/fake/register.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/scheme/doc.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/scheme/register.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/doc.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/doc.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_kafkachannel.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_messaging_client.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/generated_expansion.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/kafkachannel.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/messaging_client.go create mode 100644 contrib/kafka/pkg/client/informers/externalversions/factory.go create mode 100644 contrib/kafka/pkg/client/informers/externalversions/generic.go create mode 100644 contrib/kafka/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 contrib/kafka/pkg/client/informers/externalversions/messaging/interface.go create mode 100644 contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/interface.go create mode 100644 contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/kafkachannel.go create mode 100644 contrib/kafka/pkg/client/listers/messaging/v1alpha1/expansion_generated.go create mode 100644 contrib/kafka/pkg/client/listers/messaging/v1alpha1/kafkachannel.go diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/crd_validation_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/crd_validation_test.go new file mode 100644 index 00000000000..557b0f87987 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/crd_validation_test.go @@ -0,0 +1,43 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/knative/pkg/apis" + "github.com/knative/pkg/webhook" +) + +type CRDTest struct { + name string + cr webhook.GenericCRD + want *apis.FieldError +} + +func doValidateTest(t *testing.T, tests []CRDTest) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.cr.Validate(context.TODO()) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("%s: validate (-want, +got) = %v", test.name, diff) + } + }) + } +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/doc.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/doc.go new file mode 100644 index 00000000000..64e1d2ec055 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 is the v1alpha1 version of the API. +// +k8s:deepcopy-gen=package +// +groupName=messaging.knative.dev +package v1alpha1 diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go new file mode 100644 index 00000000000..54851449fa9 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go @@ -0,0 +1,27 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import "context" + +func (c *KafkaChannel) SetDefaults(ctx context.Context) { + c.Spec.SetDefaults(ctx) +} + +func (cs *KafkaChannelSpec) SetDefaults(ctx context.Context) { + // TODO: Nothing to default here... +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go new file mode 100644 index 00000000000..41be5d5d268 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go @@ -0,0 +1,123 @@ +/* + * Copyright 2019 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha1 + +import ( + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +var kc = duckv1alpha1.NewLivingConditionSet( + KafkaChannelConditionDispatcherReady, + KafkaChannelConditionServiceReady, + KafkaChannelConditionEndpointsReady, + KafkaChannelConditionAddressable, + KafkaChannelConditionChannelServiceReady) + +const ( + // KafkaChannelConditionReady has status True when all subconditions below have been set to True. + KafkaChannelConditionReady = duckv1alpha1.ConditionReady + + // KafkaChannelConditionDispatcherReady has status True when a Dispatcher deployment is ready + // Keyed off appsv1.DeploymentAvailable, which means minimum available replicas required are up + // and running for at least minReadySeconds. + KafkaChannelConditionDispatcherReady duckv1alpha1.ConditionType = "DispatcherReady" + + // KafkaChannelConditionServiceReady has status True when a k8s Service is ready. This + // basically just means it exists because there's no meaningful status in Service. See Endpoints + // below. + KafkaChannelConditionServiceReady duckv1alpha1.ConditionType = "ServiceReady" + + // KafkaChannelConditionEndpointsReady has status True when a k8s Service Endpoints are backed + // by at least one endpoint. + KafkaChannelConditionEndpointsReady duckv1alpha1.ConditionType = "EndpointsReady" + + // KafkaChannelConditionAddressable has status true when this KafkaChannel meets + // the Addressable contract and has a non-empty hostname. + KafkaChannelConditionAddressable duckv1alpha1.ConditionType = "Addressable" + + // KafkaChannelConditionServiceReady has status True when a k8s Service representing the channel is ready. + // Because this uses ExternalName, there are no endpoints to check. + KafkaChannelConditionChannelServiceReady duckv1alpha1.ConditionType = "ChannelServiceReady" +) + +// GetCondition returns the condition currently associated with the given type, or nil. +func (cs *KafkaChannelStatus) GetCondition(t duckv1alpha1.ConditionType) *duckv1alpha1.Condition { + return kc.Manage(cs).GetCondition(t) +} + +// IsReady returns true if the resource is ready overall. +func (cs *KafkaChannelStatus) IsReady() bool { + return kc.Manage(cs).IsHappy() +} + +// InitializeConditions sets relevant unset conditions to Unknown state. +func (cs *KafkaChannelStatus) InitializeConditions() { + kc.Manage(cs).InitializeConditions() +} + +// TODO: Use the new beta duck types. +func (cs *KafkaChannelStatus) SetAddress(hostname string) { + cs.Address.Hostname = hostname + if hostname != "" { + kc.Manage(cs).MarkTrue(KafkaChannelConditionAddressable) + } else { + kc.Manage(cs).MarkFalse(KafkaChannelConditionAddressable, "EmptyHostname", "hostname is the empty string") + } +} + +func (cs *KafkaChannelStatus) MarkDispatcherFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionDispatcherReady, reason, messageFormat, messageA...) +} + +// TODO: Unify this with the ones from Eventing. Say: Broker, Trigger. +func (cs *KafkaChannelStatus) PropagateDispatcherStatus(ds *appsv1.DeploymentStatus) { + for _, cond := range ds.Conditions { + if cond.Type == appsv1.DeploymentAvailable { + if cond.Status != corev1.ConditionTrue { + cs.MarkDispatcherFailed("DispatcherNotReady", "Dispatcher Deployment is not ready: %s : %s", cond.Reason, cond.Message) + } else { + kc.Manage(cs).MarkTrue(KafkaChannelConditionDispatcherReady) + } + } + } +} + +func (cs *KafkaChannelStatus) MarkServiceFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionServiceReady, reason, messageFormat, messageA...) +} + +func (cs *KafkaChannelStatus) MarkServiceTrue() { + kc.Manage(cs).MarkTrue(KafkaChannelConditionServiceReady) +} + +func (cs *KafkaChannelStatus) MarkChannelServiceFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionChannelServiceReady, reason, messageFormat, messageA...) +} + +func (cs *KafkaChannelStatus) MarkChannelServiceTrue() { + kc.Manage(cs).MarkTrue(KafkaChannelConditionChannelServiceReady) +} + +func (cs *KafkaChannelStatus) MarkEndpointsFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionEndpointsReady, reason, messageFormat, messageA...) +} + +func (cs *KafkaChannelStatus) MarkEndpointsTrue() { + kc.Manage(cs).MarkTrue(KafkaChannelConditionEndpointsReady) +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go new file mode 100644 index 00000000000..1b228561ad3 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go @@ -0,0 +1,369 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +var condReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionReady, + Status: corev1.ConditionTrue, +} + +var condDispatcherReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionTrue, +} + +var condDispatcherNotReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionFalse, +} + +var condDispatcherServiceReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionServiceReady, + Status: corev1.ConditionTrue, +} + +var condDispatcherEndpointsReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionEndpointsReady, + Status: corev1.ConditionTrue, +} + +var condDispatcherAddressable = duckv1alpha1.Condition{ + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionTrue, +} + +var deploymentConditionReady = appsv1.DeploymentCondition{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, +} + +var deploymentConditionNotReady = appsv1.DeploymentCondition{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionFalse, +} + +var deploymentStatusReady = &appsv1.DeploymentStatus{Conditions: []appsv1.DeploymentCondition{deploymentConditionReady}} +var deploymentStatusNotReady = &appsv1.DeploymentStatus{Conditions: []appsv1.DeploymentCondition{deploymentConditionNotReady}} + +var ignoreAllButTypeAndStatus = cmpopts.IgnoreFields( + duckv1alpha1.Condition{}, + "LastTransitionTime", "Message", "Reason", "Severity") + +var ignoreLastTransitionTime = cmpopts.IgnoreFields(duckv1alpha1.Condition{}, "LastTransitionTime") + +func TestChannelGetCondition(t *testing.T) { + tests := []struct { + name string + cs *KafkaChannelStatus + condQuery duckv1alpha1.ConditionType + want *duckv1alpha1.Condition + }{{ + name: "single condition", + cs: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{ + condReady, + }, + }, + }, + condQuery: duckv1alpha1.ConditionReady, + want: &condReady, + }, { + name: "unknown condition", + cs: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{ + condReady, + condDispatcherNotReady, + }, + }, + }, + condQuery: duckv1alpha1.ConditionType("foo"), + want: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.cs.GetCondition(test.condQuery) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("unexpected condition (-want, +got) = %v", diff) + } + }) + } +} + +func TestChannelInitializeConditions(t *testing.T) { + tests := []struct { + name string + cs *KafkaChannelStatus + want *KafkaChannelStatus + }{{ + name: "empty", + cs: &KafkaChannelStatus{}, + want: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionChannelServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionEndpointsReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionServiceReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + }, { + name: "one false", + cs: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + want: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionChannelServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionFalse, + }, { + Type: KafkaChannelConditionEndpointsReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionServiceReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + }, { + name: "one true", + cs: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + want: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionChannelServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionTrue, + }, { + Type: KafkaChannelConditionEndpointsReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionServiceReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.cs.InitializeConditions() + if diff := cmp.Diff(test.want, test.cs, ignoreAllButTypeAndStatus); diff != "" { + t.Errorf("unexpected conditions (-want, +got) = %v", diff) + } + }) + } +} + +func TestChannelIsReady(t *testing.T) { + tests := []struct { + name string + markServiceReady bool + markChannelServiceReady bool + setAddress bool + markEndpointsReady bool + wantReady bool + dispatcherStatus *appsv1.DeploymentStatus + }{{ + name: "all happy", + markServiceReady: true, + markChannelServiceReady: true, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + wantReady: true, + }, { + name: "service not ready", + markServiceReady: false, + markChannelServiceReady: false, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + wantReady: false, + }, { + name: "endpoints not ready", + markServiceReady: true, + markChannelServiceReady: false, + markEndpointsReady: false, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + wantReady: false, + }, { + name: "deployment not ready", + markServiceReady: true, + markEndpointsReady: true, + markChannelServiceReady: false, + dispatcherStatus: deploymentStatusNotReady, + setAddress: true, + wantReady: false, + }, { + name: "address not set", + markServiceReady: true, + markChannelServiceReady: false, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: false, + wantReady: false, + }, { + name: "channel service not ready", + markServiceReady: true, + markChannelServiceReady: false, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + wantReady: false, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cs := &KafkaChannelStatus{} + cs.InitializeConditions() + if test.markServiceReady { + cs.MarkServiceTrue() + } else { + cs.MarkServiceFailed("NotReadyService", "testing") + } + if test.markChannelServiceReady { + cs.MarkChannelServiceTrue() + } else { + cs.MarkChannelServiceFailed("NotReadyChannelService", "testing") + } + if test.setAddress { + cs.SetAddress("foo.bar") + } + if test.markEndpointsReady { + cs.MarkEndpointsTrue() + } else { + cs.MarkEndpointsFailed("NotReadyEndpoints", "testing") + } + if test.dispatcherStatus != nil { + cs.PropagateDispatcherStatus(test.dispatcherStatus) + } else { + cs.MarkDispatcherFailed("NotReadyDispatcher", "testing") + } + got := cs.IsReady() + if test.wantReady != got { + t.Errorf("unexpected readiness: want %v, got %v", test.wantReady, got) + } + }) + } +} + +func TestKafkaChannelStatus_SetAddressable(t *testing.T) { + testCases := map[string]struct { + domainInternal string + want *KafkaChannelStatus + }{ + "empty string": { + want: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{ + { + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionFalse, + }, + // Note that Ready is here because when the condition is marked False, duck + // automatically sets Ready to false. + { + Type: KafkaChannelConditionReady, + Status: corev1.ConditionFalse, + }, + }, + }, + }, + }, + "has domain": { + domainInternal: "test-domain", + want: &KafkaChannelStatus{ + Address: duckv1alpha1.Addressable{ + Hostname: "test-domain", + }, + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{ + { + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + }, + } + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + cs := &KafkaChannelStatus{} + cs.SetAddress(tc.domainInternal) + if diff := cmp.Diff(tc.want, cs, ignoreAllButTypeAndStatus); diff != "" { + t.Errorf("unexpected conditions (-want, +got) = %v", diff) + } + }) + } +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go new file mode 100644 index 00000000000..402071d19bd --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go @@ -0,0 +1,88 @@ +/* + * Copyright 2019 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1alpha1 + +import ( + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + "github.com/knative/pkg/apis" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + "github.com/knative/pkg/webhook" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KafkaChannel is a resource representing a Kafka Channel. +type KafkaChannel struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of the Channel. + Spec KafkaChannelSpec `json:"spec,omitempty"` + + // Status represents the current state of the Channel. This data may be out of + // date. + // +optional + Status KafkaChannelStatus `json:"status,omitempty"` +} + +// Check that Channel can be validated, can be defaulted, and has immutable fields. +var _ apis.Validatable = (*KafkaChannel)(nil) +var _ apis.Defaultable = (*KafkaChannel)(nil) +var _ runtime.Object = (*KafkaChannel)(nil) +var _ webhook.GenericCRD = (*KafkaChannel)(nil) + +// KafkaChannelSpec defines which subscribers have expressed interest in +// receiving events from this KafkaChannel. +type KafkaChannelSpec struct { + // Channel conforms to Duck type Subscribable. + Subscribable *eventingduck.Subscribable `json:"subscribable,omitempty"` +} + +// ChannelStatus represents the current state of a Channel. +type KafkaChannelStatus struct { + // inherits duck/v1alpha1 Status, which currently provides: + // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller. + // * Conditions - the latest available observations of a resource's current state. + duckv1alpha1.Status `json:",inline"` + + // KafkaChannel is Addressable. It currently exposes the endpoint as a + // fully-qualified DNS name which will distribute traffic over the + // provided targets from inside the cluster. + // + // It generally has the form {channel}.{namespace}.svc.{cluster domain name} + Address duckv1alpha1.Addressable `json:"address,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KafkaChannelList is a collection of KafkaChannels. +type KafkaChannelList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []KafkaChannelSpec `json:"items"` +} + +// GetGroupVersionKind returns GroupVersionKind for KafkaChannels +func (c *KafkaChannel) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("KafkaChannel") +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go new file mode 100644 index 00000000000..fa5c4e03fba --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go @@ -0,0 +1,43 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + + "github.com/knative/pkg/apis" +) + +func (c *KafkaChannel) Validate(ctx context.Context) *apis.FieldError { + return c.Spec.Validate(ctx).ViaField("spec") +} + +func (cs *KafkaChannelSpec) Validate(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + + if cs.Subscribable != nil { + for i, subscriber := range cs.Subscribable.Subscribers { + if subscriber.ReplyURI == "" && subscriber.SubscriberURI == "" { + fe := apis.ErrMissingField("replyURI", "subscriberURI") + fe.Details = "expected at least one of, got none" + errs = errs.Also(fe.ViaField(fmt.Sprintf("subscriber[%d]", i)).ViaField("subscribable")) + } + } + } + return errs +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go new file mode 100644 index 00000000000..b49a7d7847a --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + "github.com/knative/pkg/apis" +) + +func TestGooglePubSubChannelValidation(t *testing.T) { + tests := []CRDTest{{ + name: "empty", + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{}, + }, + want: nil, + }, { + name: "valid subscribers array", + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + Subscribable: &eventingduck.Subscribable{ + Subscribers: []eventingduck.ChannelSubscriberSpec{{ + SubscriberURI: "subscriberendpoint", + ReplyURI: "resultendpoint", + }}, + }}, + }, + want: nil, + }, { + name: "empty subscriber at index 1", + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + Subscribable: &eventingduck.Subscribable{ + Subscribers: []eventingduck.ChannelSubscriberSpec{{ + SubscriberURI: "subscriberendpoint", + ReplyURI: "replyendpoint", + }, {}}, + }}, + }, + want: func() *apis.FieldError { + fe := apis.ErrMissingField("spec.subscribable.subscriber[1].replyURI", "spec.subscribable.subscriber[1].subscriberURI") + fe.Details = "expected at least one of, got none" + return fe + }(), + }, { + name: "2 empty subscribers", + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + Subscribable: &eventingduck.Subscribable{ + Subscribers: []eventingduck.ChannelSubscriberSpec{{}, {}}, + }, + }, + }, + want: func() *apis.FieldError { + var errs *apis.FieldError + fe := apis.ErrMissingField("spec.subscribable.subscriber[0].replyURI", "spec.subscribable.subscriber[0].subscriberURI") + fe.Details = "expected at least one of, got none" + errs = errs.Also(fe) + fe = apis.ErrMissingField("spec.subscribable.subscriber[1].replyURI", "spec.subscribable.subscriber[1].subscriberURI") + fe.Details = "expected at least one of, got none" + errs = errs.Also(fe) + return errs + }(), + }} + + doValidateTest(t, tests) +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go new file mode 100644 index 00000000000..de1ed247c6f --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/knative/eventing/pkg/apis/messaging" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: messaging.GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KafkaChannel{}, + &KafkaChannelList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/zz_generated.deepcopy.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..06416f290de --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,126 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + duckv1alpha1 "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannel) DeepCopyInto(out *KafkaChannel) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannel. +func (in *KafkaChannel) DeepCopy() *KafkaChannel { + if in == nil { + return nil + } + out := new(KafkaChannel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaChannel) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannelList) DeepCopyInto(out *KafkaChannelList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KafkaChannelSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannelList. +func (in *KafkaChannelList) DeepCopy() *KafkaChannelList { + if in == nil { + return nil + } + out := new(KafkaChannelList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaChannelList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannelSpec) DeepCopyInto(out *KafkaChannelSpec) { + *out = *in + if in.Subscribable != nil { + in, out := &in.Subscribable, &out.Subscribable + *out = new(duckv1alpha1.Subscribable) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannelSpec. +func (in *KafkaChannelSpec) DeepCopy() *KafkaChannelSpec { + if in == nil { + return nil + } + out := new(KafkaChannelSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannelStatus) DeepCopyInto(out *KafkaChannelStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + in.Address.DeepCopyInto(&out.Address) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannelStatus. +func (in *KafkaChannelStatus) DeepCopy() *KafkaChannelStatus { + if in == nil { + return nil + } + out := new(KafkaChannelStatus) + in.DeepCopyInto(out) + return out +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/clientset.go b/contrib/kafka/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 00000000000..bf01dfec9fa --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + MessagingV1alpha1() messagingv1alpha1.MessagingV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Messaging() messagingv1alpha1.MessagingV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + messagingV1alpha1 *messagingv1alpha1.MessagingV1alpha1Client +} + +// MessagingV1alpha1 retrieves the MessagingV1alpha1Client +func (c *Clientset) MessagingV1alpha1() messagingv1alpha1.MessagingV1alpha1Interface { + return c.messagingV1alpha1 +} + +// Deprecated: Messaging retrieves the default version of MessagingClient. +// Please explicitly pick a version. +func (c *Clientset) Messaging() messagingv1alpha1.MessagingV1alpha1Interface { + return c.messagingV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.messagingV1alpha1, err = messagingv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.messagingV1alpha1 = messagingv1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.messagingV1alpha1 = messagingv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/doc.go b/contrib/kafka/pkg/client/clientset/versioned/doc.go new file mode 100644 index 00000000000..1122e50bfc3 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/contrib/kafka/pkg/client/clientset/versioned/fake/clientset_generated.go b/contrib/kafka/pkg/client/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 00000000000..2cd9095901a --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1" + fakemessagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +var _ clientset.Interface = &Clientset{} + +// MessagingV1alpha1 retrieves the MessagingV1alpha1Client +func (c *Clientset) MessagingV1alpha1() messagingv1alpha1.MessagingV1alpha1Interface { + return &fakemessagingv1alpha1.FakeMessagingV1alpha1{Fake: &c.Fake} +} + +// Messaging retrieves the MessagingV1alpha1Client +func (c *Clientset) Messaging() messagingv1alpha1.MessagingV1alpha1Interface { + return &fakemessagingv1alpha1.FakeMessagingV1alpha1{Fake: &c.Fake} +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/fake/doc.go b/contrib/kafka/pkg/client/clientset/versioned/fake/doc.go new file mode 100644 index 00000000000..87f3c3e0b01 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/contrib/kafka/pkg/client/clientset/versioned/fake/register.go b/contrib/kafka/pkg/client/clientset/versioned/fake/register.go new file mode 100644 index 00000000000..d8716c25725 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/fake/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + messagingv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/scheme/doc.go b/contrib/kafka/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 00000000000..7d76538485b --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/contrib/kafka/pkg/client/clientset/versioned/scheme/register.go b/contrib/kafka/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 00000000000..655d74d7620 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + messagingv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/doc.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/doc.go new file mode 100644 index 00000000000..a1c6bb9fe8f --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/doc.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/doc.go new file mode 100644 index 00000000000..a00e5d7b21a --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_kafkachannel.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_kafkachannel.go new file mode 100644 index 00000000000..4a7fad7f9ae --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_kafkachannel.go @@ -0,0 +1,140 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKafkaChannels implements KafkaChannelInterface +type FakeKafkaChannels struct { + Fake *FakeMessagingV1alpha1 + ns string +} + +var kafkachannelsResource = schema.GroupVersionResource{Group: "messaging.knative.dev", Version: "v1alpha1", Resource: "kafkachannels"} + +var kafkachannelsKind = schema.GroupVersionKind{Group: "messaging.knative.dev", Version: "v1alpha1", Kind: "KafkaChannel"} + +// Get takes name of the kafkaChannel, and returns the corresponding kafkaChannel object, and an error if there is any. +func (c *FakeKafkaChannels) Get(name string, options v1.GetOptions) (result *v1alpha1.KafkaChannel, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(kafkachannelsResource, c.ns, name), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} + +// List takes label and field selectors, and returns the list of KafkaChannels that match those selectors. +func (c *FakeKafkaChannels) List(opts v1.ListOptions) (result *v1alpha1.KafkaChannelList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(kafkachannelsResource, kafkachannelsKind, c.ns, opts), &v1alpha1.KafkaChannelList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.KafkaChannelList{ListMeta: obj.(*v1alpha1.KafkaChannelList).ListMeta} + for _, item := range obj.(*v1alpha1.KafkaChannelList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kafkaChannels. +func (c *FakeKafkaChannels) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(kafkachannelsResource, c.ns, opts)) + +} + +// Create takes the representation of a kafkaChannel and creates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *FakeKafkaChannels) Create(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(kafkachannelsResource, c.ns, kafkaChannel), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} + +// Update takes the representation of a kafkaChannel and updates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *FakeKafkaChannels) Update(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(kafkachannelsResource, c.ns, kafkaChannel), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKafkaChannels) UpdateStatus(kafkaChannel *v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(kafkachannelsResource, "status", c.ns, kafkaChannel), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} + +// Delete takes name of the kafkaChannel and deletes it. Returns an error if one occurs. +func (c *FakeKafkaChannels) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(kafkachannelsResource, c.ns, name), &v1alpha1.KafkaChannel{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKafkaChannels) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(kafkachannelsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.KafkaChannelList{}) + return err +} + +// Patch applies the patch and returns the patched kafkaChannel. +func (c *FakeKafkaChannels) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.KafkaChannel, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(kafkachannelsResource, c.ns, name, data, subresources...), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_messaging_client.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_messaging_client.go new file mode 100644 index 00000000000..220a1e3cb47 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_messaging_client.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeMessagingV1alpha1 struct { + *testing.Fake +} + +func (c *FakeMessagingV1alpha1) KafkaChannels(namespace string) v1alpha1.KafkaChannelInterface { + return &FakeKafkaChannels{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeMessagingV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/generated_expansion.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/generated_expansion.go new file mode 100644 index 00000000000..5b2dec5a0f5 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type KafkaChannelExpansion interface{} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/kafkachannel.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/kafkachannel.go new file mode 100644 index 00000000000..d7ad9d39cb7 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/kafkachannel.go @@ -0,0 +1,174 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + scheme "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KafkaChannelsGetter has a method to return a KafkaChannelInterface. +// A group's client should implement this interface. +type KafkaChannelsGetter interface { + KafkaChannels(namespace string) KafkaChannelInterface +} + +// KafkaChannelInterface has methods to work with KafkaChannel resources. +type KafkaChannelInterface interface { + Create(*v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) + Update(*v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) + UpdateStatus(*v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.KafkaChannel, error) + List(opts v1.ListOptions) (*v1alpha1.KafkaChannelList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.KafkaChannel, err error) + KafkaChannelExpansion +} + +// kafkaChannels implements KafkaChannelInterface +type kafkaChannels struct { + client rest.Interface + ns string +} + +// newKafkaChannels returns a KafkaChannels +func newKafkaChannels(c *MessagingV1alpha1Client, namespace string) *kafkaChannels { + return &kafkaChannels{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the kafkaChannel, and returns the corresponding kafkaChannel object, and an error if there is any. +func (c *kafkaChannels) Get(name string, options v1.GetOptions) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kafkachannels"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KafkaChannels that match those selectors. +func (c *kafkaChannels) List(opts v1.ListOptions) (result *v1alpha1.KafkaChannelList, err error) { + result = &v1alpha1.KafkaChannelList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kafkachannels"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kafkaChannels. +func (c *kafkaChannels) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("kafkachannels"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a kafkaChannel and creates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *kafkaChannels) Create(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Post(). + Namespace(c.ns). + Resource("kafkachannels"). + Body(kafkaChannel). + Do(). + Into(result) + return +} + +// Update takes the representation of a kafkaChannel and updates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *kafkaChannels) Update(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kafkachannels"). + Name(kafkaChannel.Name). + Body(kafkaChannel). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *kafkaChannels) UpdateStatus(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kafkachannels"). + Name(kafkaChannel.Name). + SubResource("status"). + Body(kafkaChannel). + Do(). + Into(result) + return +} + +// Delete takes name of the kafkaChannel and deletes it. Returns an error if one occurs. +func (c *kafkaChannels) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kafkachannels"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kafkaChannels) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kafkachannels"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched kafkaChannel. +func (c *kafkaChannels) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("kafkachannels"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/messaging_client.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/messaging_client.go new file mode 100644 index 00000000000..203bea4573b --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/messaging_client.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type MessagingV1alpha1Interface interface { + RESTClient() rest.Interface + KafkaChannelsGetter +} + +// MessagingV1alpha1Client is used to interact with features provided by the messaging.knative.dev group. +type MessagingV1alpha1Client struct { + restClient rest.Interface +} + +func (c *MessagingV1alpha1Client) KafkaChannels(namespace string) KafkaChannelInterface { + return newKafkaChannels(c, namespace) +} + +// NewForConfig creates a new MessagingV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*MessagingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &MessagingV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new MessagingV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *MessagingV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new MessagingV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *MessagingV1alpha1Client { + return &MessagingV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *MessagingV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/factory.go b/contrib/kafka/pkg/client/informers/externalversions/factory.go new file mode 100644 index 00000000000..66992469f3f --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/factory.go @@ -0,0 +1,180 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces" + messaging "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Messaging() messaging.Interface +} + +func (f *sharedInformerFactory) Messaging() messaging.Interface { + return messaging.New(f, f.namespace, f.tweakListOptions) +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/generic.go b/contrib/kafka/pkg/client/informers/externalversions/generic.go new file mode 100644 index 00000000000..761bf80c064 --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/generic.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=messaging.knative.dev, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("kafkachannels"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Messaging().V1alpha1().KafkaChannels().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 00000000000..644293f3f79 --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/contrib/kafka/pkg/client/informers/externalversions/messaging/interface.go b/contrib/kafka/pkg/client/informers/externalversions/messaging/interface.go new file mode 100644 index 00000000000..2a2a4e5ecfc --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/messaging/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package messaging + +import ( + internalinterfaces "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/interface.go b/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/interface.go new file mode 100644 index 00000000000..9e09a032739 --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // KafkaChannels returns a KafkaChannelInformer. + KafkaChannels() KafkaChannelInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// KafkaChannels returns a KafkaChannelInformer. +func (v *version) KafkaChannels() KafkaChannelInformer { + return &kafkaChannelInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/kafkachannel.go b/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/kafkachannel.go new file mode 100644 index 00000000000..ddd6ae98f8c --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/kafkachannel.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + versioned "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/listers/messaging/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// KafkaChannelInformer provides access to a shared informer and lister for +// KafkaChannels. +type KafkaChannelInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.KafkaChannelLister +} + +type kafkaChannelInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewKafkaChannelInformer constructs a new informer for KafkaChannel type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewKafkaChannelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredKafkaChannelInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredKafkaChannelInformer constructs a new informer for KafkaChannel type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredKafkaChannelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MessagingV1alpha1().KafkaChannels(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MessagingV1alpha1().KafkaChannels(namespace).Watch(options) + }, + }, + &messagingv1alpha1.KafkaChannel{}, + resyncPeriod, + indexers, + ) +} + +func (f *kafkaChannelInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredKafkaChannelInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *kafkaChannelInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&messagingv1alpha1.KafkaChannel{}, f.defaultInformer) +} + +func (f *kafkaChannelInformer) Lister() v1alpha1.KafkaChannelLister { + return v1alpha1.NewKafkaChannelLister(f.Informer().GetIndexer()) +} diff --git a/contrib/kafka/pkg/client/listers/messaging/v1alpha1/expansion_generated.go b/contrib/kafka/pkg/client/listers/messaging/v1alpha1/expansion_generated.go new file mode 100644 index 00000000000..d45c47feb0d --- /dev/null +++ b/contrib/kafka/pkg/client/listers/messaging/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// KafkaChannelListerExpansion allows custom methods to be added to +// KafkaChannelLister. +type KafkaChannelListerExpansion interface{} + +// KafkaChannelNamespaceListerExpansion allows custom methods to be added to +// KafkaChannelNamespaceLister. +type KafkaChannelNamespaceListerExpansion interface{} diff --git a/contrib/kafka/pkg/client/listers/messaging/v1alpha1/kafkachannel.go b/contrib/kafka/pkg/client/listers/messaging/v1alpha1/kafkachannel.go new file mode 100644 index 00000000000..2401d9dbfe2 --- /dev/null +++ b/contrib/kafka/pkg/client/listers/messaging/v1alpha1/kafkachannel.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// KafkaChannelLister helps list KafkaChannels. +type KafkaChannelLister interface { + // List lists all KafkaChannels in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.KafkaChannel, err error) + // KafkaChannels returns an object that can list and get KafkaChannels. + KafkaChannels(namespace string) KafkaChannelNamespaceLister + KafkaChannelListerExpansion +} + +// kafkaChannelLister implements the KafkaChannelLister interface. +type kafkaChannelLister struct { + indexer cache.Indexer +} + +// NewKafkaChannelLister returns a new KafkaChannelLister. +func NewKafkaChannelLister(indexer cache.Indexer) KafkaChannelLister { + return &kafkaChannelLister{indexer: indexer} +} + +// List lists all KafkaChannels in the indexer. +func (s *kafkaChannelLister) List(selector labels.Selector) (ret []*v1alpha1.KafkaChannel, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KafkaChannel)) + }) + return ret, err +} + +// KafkaChannels returns an object that can list and get KafkaChannels. +func (s *kafkaChannelLister) KafkaChannels(namespace string) KafkaChannelNamespaceLister { + return kafkaChannelNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// KafkaChannelNamespaceLister helps list and get KafkaChannels. +type KafkaChannelNamespaceLister interface { + // List lists all KafkaChannels in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.KafkaChannel, err error) + // Get retrieves the KafkaChannel from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.KafkaChannel, error) + KafkaChannelNamespaceListerExpansion +} + +// kafkaChannelNamespaceLister implements the KafkaChannelNamespaceLister +// interface. +type kafkaChannelNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all KafkaChannels in the indexer for a given namespace. +func (s kafkaChannelNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.KafkaChannel, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KafkaChannel)) + }) + return ret, err +} + +// Get retrieves the KafkaChannel from the indexer for a given namespace and name. +func (s kafkaChannelNamespaceLister) Get(name string) (*v1alpha1.KafkaChannel, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("kafkachannel"), name) + } + return obj.(*v1alpha1.KafkaChannel), nil +} diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index b7e3fc9e7ab..d638b309d80 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -37,5 +37,18 @@ ${CODEGEN_PKG}/generate-groups.sh "deepcopy" \ "duck:v1alpha1" \ --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt +CONTRIB_DIRS=(contrib/kafka/pkg) + +for DIR in "${CONTRIB_DIRS[@]}"; do + # generate the code with: + # --output-base because this script should also be able to run inside the vendor dir of + # k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir + # instead of the $GOPATH directly. For normal projects this can be dropped. + ${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ + github.com/knative/eventing/${DIR}/client github.com/knative/eventing/${DIR}/apis \ + "messaging:v1alpha1" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt +done + # Make sure our dependencies are up-to-date ${REPO_ROOT_DIR}/hack/update-deps.sh From 3d5e3db281efb923e485d6a18f9ec4c3edcead56 Mon Sep 17 00:00:00 2001 From: nachocano Date: Mon, 20 May 2019 13:39:03 -0700 Subject: [PATCH 03/64] configs --- contrib/kafka/cmd/channel_controller/main.go | 1 + contrib/kafka/cmd/channel_dispatcher/main.go | 1 + .../config/200-controller-clusterrole.yaml | 86 +++++++++++++++++ .../config/200-dispatcher-clusterrole.yaml | 36 ++++++++ .../kafka/config/200-dispatcher-service.yaml | 30 ++++++ contrib/kafka/config/200-serviceaccount.yaml | 26 ++++++ .../kafka/config/201-clusterrolebinding.yaml | 43 +++++++++ contrib/kafka/config/300-kafka-channel.yaml | 92 +++++++++++++++++++ contrib/kafka/config/500-controller.yaml | 52 +++++++++++ contrib/kafka/config/500-dispatcher.yaml | 44 +++++++++ 10 files changed, 411 insertions(+) create mode 100644 contrib/kafka/cmd/channel_controller/main.go create mode 100644 contrib/kafka/cmd/channel_dispatcher/main.go create mode 100644 contrib/kafka/config/200-controller-clusterrole.yaml create mode 100644 contrib/kafka/config/200-dispatcher-clusterrole.yaml create mode 100644 contrib/kafka/config/200-dispatcher-service.yaml create mode 100644 contrib/kafka/config/200-serviceaccount.yaml create mode 100644 contrib/kafka/config/201-clusterrolebinding.yaml create mode 100644 contrib/kafka/config/300-kafka-channel.yaml create mode 100644 contrib/kafka/config/500-controller.yaml create mode 100644 contrib/kafka/config/500-dispatcher.yaml diff --git a/contrib/kafka/cmd/channel_controller/main.go b/contrib/kafka/cmd/channel_controller/main.go new file mode 100644 index 00000000000..44ef369ba84 --- /dev/null +++ b/contrib/kafka/cmd/channel_controller/main.go @@ -0,0 +1 @@ +package channel_controller diff --git a/contrib/kafka/cmd/channel_dispatcher/main.go b/contrib/kafka/cmd/channel_dispatcher/main.go new file mode 100644 index 00000000000..976ac06a2ab --- /dev/null +++ b/contrib/kafka/cmd/channel_dispatcher/main.go @@ -0,0 +1 @@ +package channel_dispatcher diff --git a/contrib/kafka/config/200-controller-clusterrole.yaml b/contrib/kafka/config/200-controller-clusterrole.yaml new file mode 100644 index 00000000000..a4221f2da9d --- /dev/null +++ b/contrib/kafka/config/200-controller-clusterrole.yaml @@ -0,0 +1,86 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kafka-controller +rules: + - apiGroups: + - messaging.knative.dev + resources: + - kafkachannels + - kafkachannels/status + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" # Core API group. + resources: + - services + - configmaps + verbs: + - get + - list + - watch + - create + - apiGroups: + - "" # Core API group. + resources: + - services + verbs: + - update + - apiGroups: + - "" # Core API Group. + resources: + - configmaps + resourceNames: + - kafka-dispatcher + verbs: + - update + - apiGroups: + - "" # Core API Group. + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" # Core API group. + resources: + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - deployments + - deployments/status + verbs: + - get + - list + - watch diff --git a/contrib/kafka/config/200-dispatcher-clusterrole.yaml b/contrib/kafka/config/200-dispatcher-clusterrole.yaml new file mode 100644 index 00000000000..7095292ba99 --- /dev/null +++ b/contrib/kafka/config/200-dispatcher-clusterrole.yaml @@ -0,0 +1,36 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kafka-dispatcher +rules: + - apiGroups: + - messaging.knative.dev + resources: + - kafkachannels + - kafkachannels/status + verbs: + - get + - list + - watch + - apiGroups: + - "" # Core API group. + resources: + - configmaps + verbs: + - get + - list + - watch diff --git a/contrib/kafka/config/200-dispatcher-service.yaml b/contrib/kafka/config/200-dispatcher-service.yaml new file mode 100644 index 00000000000..31172e49d65 --- /dev/null +++ b/contrib/kafka/config/200-dispatcher-service.yaml @@ -0,0 +1,30 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: kafka-dispatcher + namespace: knative-eventing + labels: + messaging.knative.dev/channel: kafka-channel + messaging.knative.dev/role: dispatcher +spec: + selector: + messaging.knative.dev/channel: kafka-channel + messaging.knative.dev/role: dispatcher + ports: + - port: 80 + protocol: TCP + targetPort: 8080 diff --git a/contrib/kafka/config/200-serviceaccount.yaml b/contrib/kafka/config/200-serviceaccount.yaml new file mode 100644 index 00000000000..5ba533cd4e4 --- /dev/null +++ b/contrib/kafka/config/200-serviceaccount.yaml @@ -0,0 +1,26 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka-controller + namespace: knative-eventing + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka-dispatcher + namespace: knative-eventing diff --git a/contrib/kafka/config/201-clusterrolebinding.yaml b/contrib/kafka/config/201-clusterrolebinding.yaml new file mode 100644 index 00000000000..8b22194151f --- /dev/null +++ b/contrib/kafka/config/201-clusterrolebinding.yaml @@ -0,0 +1,43 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kafka-controller +subjects: + - kind: ServiceAccount + name: kafka-controller + namespace: knative-eventing +roleRef: + kind: ClusterRole + name: kafka-controller + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kafka-dispatcher + namespace: knative-eventing +subjects: + - kind: ServiceAccount + name: kafka-dispatcher + namespace: knative-eventing +roleRef: + kind: ClusterRole + name: kafka-dispatcher + apiGroup: rbac.authorization.k8s.io + diff --git a/contrib/kafka/config/300-kafka-channel.yaml b/contrib/kafka/config/300-kafka-channel.yaml new file mode 100644 index 00000000000..74d72de01a7 --- /dev/null +++ b/contrib/kafka/config/300-kafka-channel.yaml @@ -0,0 +1,92 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: kafkachannels.messaging.knative.dev + labels: + knative.dev/crd-install: "true" +spec: + group: messaging.knative.dev + version: v1alpha1 + names: + kind: KafkaChannel + plural: kafkachannels + singular: kafkachannel + categories: + - all + - knative + - messaging + shortNames: + - kc + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type==\"Ready\")].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type==\"Ready\")].reason" + - name: Hostname + type: string + JSONPath: .status.address.hostname + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + validation: + openAPIV3Schema: + properties: + spec: + properties: + subscribable: + type: object + properties: + subscribers: + type: array + items: + required: + - uid + properties: + ref: + type: object + required: + - namespace + - name + - uid + properties: + apiVersion: + type: string + kind: + type: string + name: + type: string + minLength: 1 + namespace: + type: string + minLength: 1 + uid: + type: string + minLength: 1 + uid: + type: string + minLength: 1 + subscriberURI: + type: string + minLength: 1 + replyURI: + type: string + minLength: 1 diff --git a/contrib/kafka/config/500-controller.yaml b/contrib/kafka/config/500-controller.yaml new file mode 100644 index 00000000000..e05c2293b41 --- /dev/null +++ b/contrib/kafka/config/500-controller.yaml @@ -0,0 +1,52 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-controller + namespace: knative-eventing +spec: + replicas: 1 + selector: + matchLabels: &labels + messaging.knative.dev/channel: kafka-channel + messaging.knative.dev/role: controller + template: + metadata: + labels: *labels + spec: + serviceAccountName: kafka-controller + containers: + - name: controller + image: github.com/knative/eventing/contrib/kafka/cmd/channel_controller + env: + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: config-logging + mountPath: /etc/config-logging + - name: config-channel + mountPath: /etc/config-channel + volumes: + - name: config-logging + configMap: + name: config-logging + - name: config-channel + configMap: + name: kafka-channel-config diff --git a/contrib/kafka/config/500-dispatcher.yaml b/contrib/kafka/config/500-dispatcher.yaml new file mode 100644 index 00000000000..73c2723a298 --- /dev/null +++ b/contrib/kafka/config/500-dispatcher.yaml @@ -0,0 +1,44 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-dispatcher + namespace: knative-eventing +spec: + replicas: 1 + selector: + matchLabels: &labels + messaging.knative.dev/channel: kafka-channel + messaging.knative.dev/role: dispatcher + template: + metadata: + labels: *labels + spec: + serviceAccountName: kafka-dispatcher + containers: + - name: dispatcher + image: github.com/knative/eventing/contrib/kafka/cmd/channel_dispatcher + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: config-channel + mountPath: /etc/config-channel + volumes: + - name: config-channel + configMap: + name: kafka-channel-config From ecbff88f9f10df70f4ea86e614f56e6a46626f09 Mon Sep 17 00:00:00 2001 From: nachocano Date: Mon, 20 May 2019 13:56:23 -0700 Subject: [PATCH 04/64] more stuff --- contrib/kafka/cmd/channel_controller/main.go | 183 +++++++++++++++++- .../pkg/reconciler/controller/kafkachannel.go | 1 + .../pkg/reconciler/dispatcher/kafkachannel.go | 1 + 3 files changed, 184 insertions(+), 1 deletion(-) create mode 100644 contrib/kafka/pkg/reconciler/controller/kafkachannel.go create mode 100644 contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go diff --git a/contrib/kafka/cmd/channel_controller/main.go b/contrib/kafka/cmd/channel_controller/main.go index 44ef369ba84..e3b5ee91094 100644 --- a/contrib/kafka/cmd/channel_controller/main.go +++ b/contrib/kafka/cmd/channel_controller/main.go @@ -1 +1,182 @@ -package channel_controller +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "github.com/kelseyhightower/envconfig" + corev1 "k8s.io/api/core/v1" + "log" + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + informers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions" + "github.com/knative/eventing/contrib/kafka/pkg/reconciler/controller" + "github.com/knative/eventing/pkg/logconfig" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/reconciler" + "github.com/knative/pkg/configmap" + kncontroller "github.com/knative/pkg/controller" + "github.com/knative/pkg/signals" + "github.com/knative/pkg/system" + "go.uber.org/zap" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +const ( + dispatcherDeploymentName = "kafka-dispatcher" + dispatcherServiceName = "kafka-dispatcher" +) + +var ( + hardcodedLoggingConfig = flag.Bool("hardCodedLoggingConfig", false, "If true, use the hard coded logging config. It is intended to be used only when debugging outside a Kubernetes cluster.") + masterURL = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") + kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") +) + +func main() { + flag.Parse() + logger, atomicLevel := setupLogger() + defer logger.Sync() + + // set up signals so we handle the first shutdown signal gracefully + stopCh := signals.SetupSignalHandler() + + cfg, err := clientcmd.BuildConfigFromFlags(*masterURL, *kubeconfig) + if err != nil { + logger.Fatalw("Error building kubeconfig", zap.Error(err)) + } + + logger = logger.With(zap.String("controller/impl", "pkg")) + logger.Info("Starting the Kafka controller") + + systemNS := system.Namespace() + + const numControllers = 1 + cfg.QPS = numControllers * rest.DefaultQPS + cfg.Burst = numControllers * rest.DefaultBurst + opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh) + // Setting up our own eventingClientSet as we need the messaging API introduced with kafka. + eventingClientSet := clientset.NewForConfigOrDie(cfg) + + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(opt.KubeClientSet, opt.ResyncPeriod) + eventingInformerFactory := informers.NewSharedInformerFactory(eventingClientSet, opt.ResyncPeriod) + + // Messaging + kafkaChannelInformer := eventingInformerFactory.Messaging().V1alpha1().KafkaChannels() + + // Kube + serviceInformer := kubeInformerFactory.Core().V1().Services() + endpointsInformer := kubeInformerFactory.Core().V1().Endpoints() + deploymentInformer := kubeInformerFactory.Apps().V1().Deployments() + + // Build all of our controllers, with the clients constructed above. + // Add new controllers to this array. + // You also need to modify numControllers above to match this. + controllers := [...]*kncontroller.Impl{ + controller.NewController( + opt, + eventingClientSet, + systemNS, + dispatcherDeploymentName, + dispatcherServiceName, + pubSubChannelInformer, + deploymentInformer, + serviceInformer, + endpointsInformer, + ), + } + // This line asserts at compile time that the length of controllers is equal to numControllers. + // It is based on https://go101.org/article/tips.html#assert-at-compile-time, which notes that + // var _ [N-M]int + // asserts at compile time that N >= M, which we can use to establish equality of N and M: + // (N >= M) && (M >= N) => (N == M) + var _ [numControllers - len(controllers)][len(controllers) - numControllers]int + + // Watch the logging config map and dynamically update logging levels. + opt.ConfigMapWatcher.Watch(logconfig.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.Controller)) + // TODO: Watch the observability config map and dynamically update metrics exporter. + //opt.ConfigMapWatcher.Watch(metrics.ObservabilityConfigName, metrics.UpdateExporterFromConfigMap(component, logger)) + if err := opt.ConfigMapWatcher.Start(stopCh); err != nil { + logger.Fatalw("failed to start configuration manager", zap.Error(err)) + } + + // Start all of the informers and wait for them to sync. + logger.Info("Starting informers.") + if err := kncontroller.StartInformers( + stopCh, + // Messaging + pubSubChannelInformer.Informer(), + + // Kube + serviceInformer.Informer(), + deploymentInformer.Informer(), + endpointsInformer.Informer(), + ); err != nil { + logger.Fatalf("Failed to start informers: %v", err) + } + + logger.Info("Starting controllers.") + kncontroller.StartAll(stopCh, controllers[:]...) +} + +func setupLogger() (*zap.SugaredLogger, zap.AtomicLevel) { + // Set up our logger. + loggingConfigMap := getLoggingConfigOrDie() + loggingConfig, err := logging.NewConfigFromMap(loggingConfigMap) + if err != nil { + log.Fatalf("Error parsing logging configuration: %v", err) + } + return logging.NewLoggerFromConfig(loggingConfig, logconfig.Controller) +} + +func getLoggingConfigOrDie() map[string]string { + if hardcodedLoggingConfig != nil && *hardcodedLoggingConfig { + return map[string]string{ + "loglevel.controller": "info", + "zap-logger-config": ` + { + "level": "info", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + }`, + } + } else { + cm, err := configmap.Load("/etc/config-logging") + if err != nil { + log.Fatalf("Error loading logging configuration: %v", err) + } + return cm + } +} diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go new file mode 100644 index 00000000000..b0b429f8999 --- /dev/null +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -0,0 +1 @@ +package controller diff --git a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go new file mode 100644 index 00000000000..909218d5017 --- /dev/null +++ b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go @@ -0,0 +1 @@ +package dispatcher From e843960bcf965dae2dceea48629cae956a237c3b Mon Sep 17 00:00:00 2001 From: nachocano Date: Mon, 20 May 2019 14:10:13 -0700 Subject: [PATCH 05/64] compiling controller --- contrib/kafka/cmd/channel_controller/main.go | 6 +- .../pkg/reconciler/controller/kafkachannel.go | 329 ++++++++++++++++++ 2 files changed, 331 insertions(+), 4 deletions(-) diff --git a/contrib/kafka/cmd/channel_controller/main.go b/contrib/kafka/cmd/channel_controller/main.go index e3b5ee91094..cfca19296b0 100644 --- a/contrib/kafka/cmd/channel_controller/main.go +++ b/contrib/kafka/cmd/channel_controller/main.go @@ -18,8 +18,6 @@ package main import ( "flag" - "github.com/kelseyhightower/envconfig" - corev1 "k8s.io/api/core/v1" "log" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" @@ -97,7 +95,7 @@ func main() { systemNS, dispatcherDeploymentName, dispatcherServiceName, - pubSubChannelInformer, + kafkaChannelInformer, deploymentInformer, serviceInformer, endpointsInformer, @@ -123,7 +121,7 @@ func main() { if err := kncontroller.StartInformers( stopCh, // Messaging - pubSubChannelInformer.Informer(), + kafkaChannelInformer.Informer(), // Kube serviceInformer.Informer(), diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go index b0b429f8999..466e2b964c0 100644 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -1 +1,330 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controller + +import ( + "context" + "reflect" + "time" + + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + messaginginformers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1" + listers "github.com/knative/eventing/contrib/kafka/pkg/client/listers/messaging/v1alpha1" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/reconciler" + "github.com/knative/pkg/controller" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + appsv1informers "k8s.io/client-go/informers/apps/v1" + corev1informers "k8s.io/client-go/informers/core/v1" + appsv1listers "k8s.io/client-go/listers/apps/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" +) + +const ( + // ReconcilerName is the name of the reconciler. + ReconcilerName = "KafkaChannels" + + // controllerAgentName is the string used by this controller to identify + // itself when creating events. + controllerAgentName = "kafka-controller" + + finalizerName = controllerAgentName + + // Name of the corev1.Events emitted from the reconciliation process. + channelReconciled = "ChannelReconciled" + channelReconcileFailed = "ChannelReconcileFailed" + channelUpdateStatusFailed = "ChannelUpdateStatusFailed" +) + +// Reconciler reconciles Kafka Channels. +type Reconciler struct { + *reconciler.Base + + dispatcherNamespace string + dispatcherDeploymentName string + dispatcherServiceName string + + eventingClientSet *versioned.Clientset + kafkachannelLister listers.KafkaChannelLister + kafkachannelInformer cache.SharedIndexInformer + deploymentLister appsv1listers.DeploymentLister + serviceLister corev1listers.ServiceLister + endpointsLister corev1listers.EndpointsLister + impl *controller.Impl +} + +var ( + deploymentGVK = appsv1.SchemeGroupVersion.WithKind("Deployment") + serviceGVK = corev1.SchemeGroupVersion.WithKind("Service") +) + +// Check that our Reconciler implements controller.Reconciler. +var _ controller.Reconciler = (*Reconciler)(nil) + +// Check that our Reconciler implements cache.ResourceEventHandler +var _ cache.ResourceEventHandler = (*Reconciler)(nil) + +// NewController initializes the controller and is called by the generated code. +// Registers event handlers to enqueue events. +func NewController( + opt reconciler.Options, + eventingClientSet *versioned.Clientset, + dispatcherNamespace string, + dispatcherDeploymentName string, + dispatcherServiceName string, + kafkachannelInformer messaginginformers.KafkaChannelInformer, + deploymentInformer appsv1informers.DeploymentInformer, + serviceInformer corev1informers.ServiceInformer, + endpointsInformer corev1informers.EndpointsInformer, +) *controller.Impl { + + r := &Reconciler{ + Base: reconciler.NewBase(opt, controllerAgentName), + dispatcherNamespace: dispatcherNamespace, + dispatcherDeploymentName: dispatcherDeploymentName, + dispatcherServiceName: dispatcherServiceName, + eventingClientSet: eventingClientSet, + kafkachannelLister: kafkachannelInformer.Lister(), + kafkachannelInformer: kafkachannelInformer.Informer(), + deploymentLister: deploymentInformer.Lister(), + serviceLister: serviceInformer.Lister(), + endpointsLister: endpointsInformer.Lister(), + } + r.impl = controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger)) + + r.Logger.Info("Setting up event handlers") + kafkachannelInformer.Informer().AddEventHandler(reconciler.Handler(r.impl.Enqueue)) + + // Set up watches for dispatcher resources we care about, since any changes to these + // resources will affect our Channels. So, set up a watch here, that will cause + // a global Resync for all the channels to take stock of their health when these change. + deploymentInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.FilterWithNameAndNamespace(dispatcherNamespace, dispatcherDeploymentName), + Handler: r, + }) + serviceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.FilterWithNameAndNamespace(dispatcherNamespace, dispatcherServiceName), + Handler: r, + }) + endpointsInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.FilterWithNameAndNamespace(dispatcherNamespace, dispatcherServiceName), + Handler: r, + }) + return r.impl +} + +// cache.ResourceEventHandler implementation. +// These 3 functions just cause a Global Resync of the channels, because any changes here +// should be reflected onto the channels. +func (r *Reconciler) OnAdd(obj interface{}) { + r.impl.GlobalResync(r.kafkachannelInformer) +} + +func (r *Reconciler) OnUpdate(old, new interface{}) { + r.impl.GlobalResync(r.kafkachannelInformer) +} + +func (r *Reconciler) OnDelete(obj interface{}) { + r.impl.GlobalResync(r.kafkachannelInformer) +} + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the KafkaChannel resource +// with the current status of the resource. +func (r *Reconciler) Reconcile(ctx context.Context, key string) error { + // Convert the namespace/name string into a distinct namespace and name. + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logging.FromContext(ctx).Error("invalid resource key") + return nil + } + + // Get the KafkaChannel resource with this namespace/name. + original, err := r.kafkachannelLister.KafkaChannels(namespace).Get(name) + if apierrs.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing. + logging.FromContext(ctx).Error("KafkaChannel key in work queue no longer exists") + return nil + } else if err != nil { + return err + } + + // Don't modify the informers copy. + channel := original.DeepCopy() + + // Reconcile this copy of the KafkaChannel and then write back any status updates regardless of + // whether the reconcile error out. + reconcileErr := r.reconcile(ctx, channel) + if reconcileErr != nil { + logging.FromContext(ctx).Error("Error reconciling KafkaChannel", zap.Error(reconcileErr)) + r.Recorder.Eventf(channel, corev1.EventTypeWarning, channelReconcileFailed, "KafkaChannel reconciliation failed: %v", reconcileErr) + } else { + logging.FromContext(ctx).Debug("KafkaChannel reconciled") + r.Recorder.Event(channel, corev1.EventTypeNormal, channelReconciled, "KafkaChannel reconciled") + } + + if _, updateStatusErr := r.updateStatus(ctx, channel); updateStatusErr != nil { + logging.FromContext(ctx).Error("Failed to update GoogleCloudPubSubChannel status", zap.Error(updateStatusErr)) + r.Recorder.Eventf(channel, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update KafkaChannel's status: %v", updateStatusErr) + return updateStatusErr + } + + // Requeue if the resource is not ready + return reconcileErr +} + +func (r *Reconciler) reconcile(ctx context.Context, kc *v1alpha1.KafkaChannel) error { + kc.Status.InitializeConditions() + + if kc.DeletionTimestamp != nil { + // Everything is cleaned up by the garbage collector. + return nil + } + + // We reconcile the status of the Channel by looking at: + // 1. Dispatcher Deployment for it's readiness. + // 2. Dispatcher k8s Service for it's existence. + // 3. Dispatcher endpoints to ensure that there's something backing the Service. + // 4. k8s service representing the channel that will use ExternalName to point to the Dispatcher k8s service + + // Get the Dispatcher Deployment and propagate the status to the Channel + //d, err := r.deploymentLister.Deployments(r.dispatcherNamespace).Get(r.dispatcherDeploymentName) + //if err != nil { + // if apierrs.IsNotFound(err) { + // imc.Status.MarkDispatcherFailed("DispatcherDeploymentDoesNotExist", "Dispatcher Deployment does not exist") + // } else { + // logging.FromContext(ctx).Error("Unable to get the dispatcher Deployment", zap.Error(err)) + // imc.Status.MarkDispatcherFailed("DispatcherDeploymentGetFailed", "Failed to get dispatcher Deployment") + // } + // return err + //} + //imc.Status.PropagateDispatcherStatus(&d.Status) + // + //// Get the Dispatcher Service and propagate the status to the Channel in case it does not exist. + //// We don't do anything with the service because it's status contains nothing useful, so just do + //// an existence check. Then below we check the endpoints targeting it. + //_, err = r.serviceLister.Services(r.dispatcherNamespace).Get(r.dispatcherServiceName) + //if err != nil { + // if apierrs.IsNotFound(err) { + // imc.Status.MarkServiceFailed("DispatcherServiceDoesNotExist", "Dispatcher Service does not exist") + // } else { + // logging.FromContext(ctx).Error("Unable to get the dispatcher service", zap.Error(err)) + // imc.Status.MarkServiceFailed("DispatcherServiceGetFailed", "Failed to get dispatcher service") + // } + // return err + //} + // + //imc.Status.MarkServiceTrue() + // + //// Get the Dispatcher Service Endpoints and propagate the status to the Channel + //// endpoints has the same name as the service, so not a bug. + //e, err := r.endpointsLister.Endpoints(r.dispatcherNamespace).Get(r.dispatcherServiceName) + //if err != nil { + // if apierrs.IsNotFound(err) { + // imc.Status.MarkEndpointsFailed("DispatcherEndpointsDoesNotExist", "Dispatcher Endpoints does not exist") + // } else { + // logging.FromContext(ctx).Error("Unable to get the dispatcher endpoints", zap.Error(err)) + // imc.Status.MarkEndpointsFailed("DispatcherEndpointsGetFailed", "Failed to get dispatcher endpoints") + // } + // return err + //} + // + //if len(e.Subsets) == 0 { + // logging.FromContext(ctx).Error("No endpoints found for Dispatcher service", zap.Error(err)) + // imc.Status.MarkEndpointsFailed("DispatcherEndpointsNotReady", "There are no endpoints ready for Dispatcher service") + // return errors.New("there are no endpoints ready for Dispatcher service") + //} + // + //imc.Status.MarkEndpointsTrue() + // + //// Reconcile the k8s service representing the actual Channel. It points to the Dispatcher service via + //// ExternalName + //svc, err := r.reconcileChannelService(ctx, imc) + //if err != nil { + // imc.Status.MarkChannelServiceFailed("ChannelServiceFailed", fmt.Sprintf("Channel Service failed: %s", err)) + // return err + //} + //imc.Status.MarkChannelServiceTrue() + //imc.Status.SetAddress(fmt.Sprintf("%s.%s.svc.%s", svc.Name, svc.Namespace, utils.GetClusterDomainName())) + + // Ok, so now the Dispatcher Deployment & Service have been created, we're golden since the + // dispatcher watches the Channel and where it needs to dispatch events to. + return nil +} + +//func (r *Reconciler) reconcileChannelService(ctx context.Context, imc *v1alpha1.InMemoryChannel) (*corev1.Service, error) { +// // Get the Service and propagate the status to the Channel in case it does not exist. +// // We don't do anything with the service because it's status contains nothing useful, so just do +// // an existence check. Then below we check the endpoints targeting it. +// // We may change this name later, so we have to ensure we use proper addressable when resolving these. +// svc, err := r.serviceLister.Services(imc.Namespace).Get(fmt.Sprintf("%s-kn-channel", imc.Name)) +// if err != nil { +// if apierrs.IsNotFound(err) { +// svc, err = resources.NewK8sService(imc, resources.ExternalService(r.dispatcherNamespace, r.dispatcherServiceName)) +// if err != nil { +// logging.FromContext(ctx).Error("failed to create the channel service object", zap.Error(err)) +// return nil, err +// } +// svc, err = r.KubeClientSet.CoreV1().Services(imc.Namespace).Create(svc) +// if err != nil { +// logging.FromContext(ctx).Error("failed to create the channel service", zap.Error(err)) +// return nil, err +// } +// return svc, nil +// } else { +// logging.FromContext(ctx).Error("Unable to get the channel service", zap.Error(err)) +// } +// return nil, err +// } +// +// // Check to make sure that our IMC owns this service and if not, complain. +// if !metav1.IsControlledBy(svc, imc) { +// return nil, fmt.Errorf("inmemorychannel: %s/%s does not own Service: %q", imc.Namespace, imc.Name, svc.Name) +// } +// return svc, nil +//} + +func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) { + kc, err := r.kafkachannelLister.KafkaChannels(desired.Namespace).Get(desired.Name) + if err != nil { + return nil, err + } + + if reflect.DeepEqual(kc.Status, desired.Status) { + return kc, nil + } + + becomesReady := desired.Status.IsReady() && !kc.Status.IsReady() + + // Don't modify the informers copy. + existing := kc.DeepCopy() + existing.Status = desired.Status + + new, err := r.eventingClientSet.MessagingV1alpha1().KafkaChannels(desired.Namespace).UpdateStatus(existing) + if err == nil && becomesReady { + duration := time.Since(new.ObjectMeta.CreationTimestamp.Time) + r.Logger.Infof("KafkaChannel %q became ready after %v", kc.Name, duration) + // TODO: stats + } + + return new, err +} From 63365ebdaea303a0c524b7e6546218e9d315d8a1 Mon Sep 17 00:00:00 2001 From: nachocano Date: Mon, 20 May 2019 15:03:59 -0700 Subject: [PATCH 06/64] updating channel spec --- contrib/kafka/cmd/channel_dispatcher/main.go | 2 +- contrib/kafka/config/300-kafka-channel.yaml | 18 ++++++++++++++++ .../messaging/v1alpha1/kafka_channel_types.go | 21 +++++++++++++++++-- 3 files changed, 38 insertions(+), 3 deletions(-) diff --git a/contrib/kafka/cmd/channel_dispatcher/main.go b/contrib/kafka/cmd/channel_dispatcher/main.go index 976ac06a2ab..06ab7d0f9a3 100644 --- a/contrib/kafka/cmd/channel_dispatcher/main.go +++ b/contrib/kafka/cmd/channel_dispatcher/main.go @@ -1 +1 @@ -package channel_dispatcher +package main diff --git a/contrib/kafka/config/300-kafka-channel.yaml b/contrib/kafka/config/300-kafka-channel.yaml index 74d72de01a7..8fbbae0a57a 100644 --- a/contrib/kafka/config/300-kafka-channel.yaml +++ b/contrib/kafka/config/300-kafka-channel.yaml @@ -52,11 +52,26 @@ spec: properties: spec: properties: + bootstrapServers: + type: string + description: "Comma-separated list of the Broker URL of the Kafka cluster, which is in the format of `my-cluster-kafka-bootstrap.my-kafka-namespace:9092`." + minLength: 1 + consumerMode: + type: string + description: "Mode used to dispatch events from different partitions in parallel. `multiplex` and `partitions` are valid values. If not set, `multiplex` is used." + minLength: 1 + numPartitions: + type: int32 + description: "Number of partitions of a Kafka topic." + replicationFactor: + type: int16 + description: "Replication factor of a Kafka topic." subscribable: type: object properties: subscribers: type: array + description: "The list of subscribers that have expressed interest in receiving events from this channel." items: required: - uid @@ -90,3 +105,6 @@ spec: replyURI: type: string minLength: 1 + required: + - bootstrapServers + - consumerMode diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go index 402071d19bd..63067b52fe9 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go @@ -50,9 +50,26 @@ var _ apis.Defaultable = (*KafkaChannel)(nil) var _ runtime.Object = (*KafkaChannel)(nil) var _ webhook.GenericCRD = (*KafkaChannel)(nil) -// KafkaChannelSpec defines which subscribers have expressed interest in -// receiving events from this KafkaChannel. +// KafkaChannelSpec defines the specification for a KafkaChannel. type KafkaChannelSpec struct { + // Comma-separated list of the Broker URL of the Kafka cluster, which is in the format + // of my-cluster-kafka-bootstrap.my-kafka-namespace:9092. + BootstrapServers string `json:"bootstrapServers"` + + // ConsumerMode is the mode used to dispatch events from different partitions in parallel. + // By default, partitions are multiplexed with a single go channel (multiplex). + // `multiplex` and `partitions` are valid values. + // +optional + ConsumerMode string `json:"consumerMode,omitempty"` + + // NumPartitions is the number of partitions of a Kafka topic. + // +optional + NumPartitions int32 `json:"numPartitions,omitempty"` + + //ReplicationFactor is the replication factor of a Kafka topic. + // +optional + ReplicationFactor int16 `json:"replicationFactor,omitempty"` + // Channel conforms to Duck type Subscribable. Subscribable *eventingduck.Subscribable `json:"subscribable,omitempty"` } From 677f02d699d21913d8c59aa358536a114c1925c5 Mon Sep 17 00:00:00 2001 From: nachocano Date: Mon, 20 May 2019 15:46:16 -0700 Subject: [PATCH 07/64] moving types down --- .../v1alpha1/kafka_channel_defaults.go | 15 ++- .../v1alpha1/kafka_channel_defaults_test.go | 102 ++++++++++++++++++ .../messaging/v1alpha1/kafka_channel_types.go | 13 +-- .../v1alpha1/kafka_channel_validation.go | 13 +++ .../v1alpha1/kafka_channel_validation_test.go | 2 +- .../kafka/pkg/controller/channel/provider.go | 5 +- .../kafka/pkg/controller/channel/reconcile.go | 11 +- contrib/kafka/pkg/controller/provider.go | 1 + .../kafka/pkg/controller/reconcile_test.go | 1 + contrib/kafka/pkg/controller/types.go | 8 -- contrib/kafka/pkg/controller/util.go | 12 +-- contrib/kafka/pkg/controller/util_test.go | 4 +- contrib/kafka/pkg/reconciler/types.go | 19 ++++ 13 files changed, 168 insertions(+), 38 deletions(-) create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go delete mode 100644 contrib/kafka/pkg/controller/types.go create mode 100644 contrib/kafka/pkg/reconciler/types.go diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go index 54851449fa9..f8299d9d085 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go @@ -16,12 +16,23 @@ limitations under the License. package v1alpha1 -import "context" +import ( + "context" + . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" +) func (c *KafkaChannel) SetDefaults(ctx context.Context) { c.Spec.SetDefaults(ctx) } func (cs *KafkaChannelSpec) SetDefaults(ctx context.Context) { - // TODO: Nothing to default here... + if cs.ConsumerMode == "" { + cs.ConsumerMode = ConsumerModeMultiplexConsumerValue + } + if cs.NumPartitions == 0 { + cs.NumPartitions = DefaultNumPartitions + } + if cs.ReplicationFactor == 0 { + cs.ReplicationFactor = DefaultReplicationFactor + } } diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go new file mode 100644 index 00000000000..27e7e6a4690 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go @@ -0,0 +1,102 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" +) + +const ( + testNumPartitions = 10 + testReplicationFactor = 5 +) + +func TestKafkaChannelDefaults(t *testing.T) { + testCases := map[string]struct { + initial KafkaChannel + expected KafkaChannel + }{ + "nil spec": { + initial: KafkaChannel{}, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: ConsumerModeMultiplexConsumerValue, + NumPartitions: DefaultNumPartitions, + ReplicationFactor: DefaultReplicationFactor, + }, + }, + }, + "consumerMode empty": { + initial: KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: "", + NumPartitions: testNumPartitions, + ReplicationFactor: testReplicationFactor, + }, + }, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: ConsumerModeMultiplexConsumerValue, + NumPartitions: testNumPartitions, + ReplicationFactor: testReplicationFactor, + }, + }, + }, + "numPartitions not set": { + initial: KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: ConsumerModeMultiplexConsumerValue, + ReplicationFactor: testReplicationFactor, + }, + }, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: ConsumerModeMultiplexConsumerValue, + NumPartitions: DefaultNumPartitions, + ReplicationFactor: testReplicationFactor, + }, + }, + }, + "replicationFactor not set": { + initial: KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: ConsumerModeMultiplexConsumerValue, + NumPartitions: testNumPartitions, + }, + }, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: ConsumerModeMultiplexConsumerValue, + NumPartitions: testNumPartitions, + ReplicationFactor: DefaultReplicationFactor, + }, + }, + }, + } + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + tc.initial.SetDefaults(context.TODO()) + if diff := cmp.Diff(tc.expected, tc.initial); diff != "" { + t.Fatalf("Unexpected defaults (-want, +got): %s", diff) + } + }) + } +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go index 63067b52fe9..33ffa4d250b 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go @@ -59,16 +59,13 @@ type KafkaChannelSpec struct { // ConsumerMode is the mode used to dispatch events from different partitions in parallel. // By default, partitions are multiplexed with a single go channel (multiplex). // `multiplex` and `partitions` are valid values. - // +optional - ConsumerMode string `json:"consumerMode,omitempty"` + ConsumerMode string `json:"consumerMode"` - // NumPartitions is the number of partitions of a Kafka topic. - // +optional - NumPartitions int32 `json:"numPartitions,omitempty"` + // NumPartitions is the number of partitions of a Kafka topic. By default, it is set to 1. + NumPartitions int32 `json:"numPartitions"` - //ReplicationFactor is the replication factor of a Kafka topic. - // +optional - ReplicationFactor int16 `json:"replicationFactor,omitempty"` + //ReplicationFactor is the replication factor of a Kafka topic. By default, it is set to 1. + ReplicationFactor int16 `json:"replicationFactor"` // Channel conforms to Duck type Subscribable. Subscribable *eventingduck.Subscribable `json:"subscribable,omitempty"` diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go index fa5c4e03fba..a3e9d811cd6 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go @@ -19,6 +19,7 @@ package v1alpha1 import ( "context" "fmt" + . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" "github.com/knative/pkg/apis" ) @@ -30,6 +31,18 @@ func (c *KafkaChannel) Validate(ctx context.Context) *apis.FieldError { func (cs *KafkaChannelSpec) Validate(ctx context.Context) *apis.FieldError { var errs *apis.FieldError + if cs.BootstrapServers == "" { + fe := apis.ErrMissingField("bootstrapServers") + errs = errs.Also(fe) + } + if cs.ConsumerMode == "" { + fe := apis.ErrMissingField("consumerMode") + errs = errs.Also(fe) + } else if cs.ConsumerMode != ConsumerModePartitionConsumerValue && cs.ConsumerMode != ConsumerModeMultiplexConsumerValue { + fe := apis.ErrInvalidValue(cs.ConsumerMode, "consumerMode") + errs = errs.Also(fe) + } + if cs.Subscribable != nil { for i, subscriber := range cs.Subscribable.Subscribers { if subscriber.ReplyURI == "" && subscriber.SubscriberURI == "" { diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go index b49a7d7847a..69415b762cb 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go @@ -23,7 +23,7 @@ import ( "github.com/knative/pkg/apis" ) -func TestGooglePubSubChannelValidation(t *testing.T) { +func TestKafkaChannelValidation(t *testing.T) { tests := []CRDTest{{ name: "empty", cr: &KafkaChannel{ diff --git a/contrib/kafka/pkg/controller/channel/provider.go b/contrib/kafka/pkg/controller/channel/provider.go index 73eab2e8d22..ee5407ea90c 100644 --- a/contrib/kafka/pkg/controller/channel/provider.go +++ b/contrib/kafka/pkg/controller/channel/provider.go @@ -30,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" common "github.com/knative/eventing/contrib/kafka/pkg/controller" + . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/pkg/system" ) @@ -51,7 +52,7 @@ type reconciler struct { client client.Client recorder record.EventRecorder logger *zap.Logger - config *common.KafkaProvisionerConfig + config *KafkaProvisionerConfig // Using a shared kafkaClusterAdmin does not work currently because of an issue with // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. kafkaClusterAdmin sarama.ClusterAdmin @@ -61,7 +62,7 @@ type reconciler struct { var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Channel controller. -func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { +func ProvideController(mgr manager.Manager, config *KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Channel. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ diff --git a/contrib/kafka/pkg/controller/channel/reconcile.go b/contrib/kafka/pkg/controller/channel/reconcile.go index 34a7e1c9b71..20538c6de7e 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile.go +++ b/contrib/kafka/pkg/controller/channel/reconcile.go @@ -23,12 +23,13 @@ import ( "github.com/Shopify/sarama" "go.uber.org/zap" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/knative/eventing/contrib/kafka/pkg/controller" + . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" util "github.com/knative/eventing/pkg/provisioners" topicUtils "github.com/knative/eventing/pkg/provisioners/utils" @@ -38,12 +39,6 @@ import ( const ( finalizerName = controllerAgentName - // DefaultNumPartitions defines the default number of partitions - DefaultNumPartitions = 1 - - // DefaultReplicationFactor defines the default number of replications - DefaultReplicationFactor = 1 - // Name of the corev1.Events emitted from the reconciliation process dispatcherReconcileFailed = "DispatcherReconcileFailed" dispatcherUpdateStatusFailed = "DispatcherUpdateStatusFailed" @@ -246,7 +241,7 @@ func (r *reconciler) getClusterChannelProvisioner() (*eventingv1alpha1.ClusterCh return clusterChannelProvisioner, nil } -func createKafkaAdminClient(config *controller.KafkaProvisionerConfig) (sarama.ClusterAdmin, error) { +func createKafkaAdminClient(config *KafkaProvisionerConfig) (sarama.ClusterAdmin, error) { saramaConf := sarama.NewConfig() saramaConf.Version = sarama.V1_1_0_0 saramaConf.ClientID = controllerAgentName diff --git a/contrib/kafka/pkg/controller/provider.go b/contrib/kafka/pkg/controller/provider.go index 0f6ca5631f2..092c6c01dfb 100644 --- a/contrib/kafka/pkg/controller/provider.go +++ b/contrib/kafka/pkg/controller/provider.go @@ -27,6 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" ) diff --git a/contrib/kafka/pkg/controller/reconcile_test.go b/contrib/kafka/pkg/controller/reconcile_test.go index 6e69b6aa57a..c7508c8a0fc 100644 --- a/contrib/kafka/pkg/controller/reconcile_test.go +++ b/contrib/kafka/pkg/controller/reconcile_test.go @@ -21,6 +21,7 @@ import ( "fmt" "testing" + . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" controllertesting "github.com/knative/eventing/pkg/reconciler/testing" diff --git a/contrib/kafka/pkg/controller/types.go b/contrib/kafka/pkg/controller/types.go deleted file mode 100644 index f0384e28402..00000000000 --- a/contrib/kafka/pkg/controller/types.go +++ /dev/null @@ -1,8 +0,0 @@ -package controller - -import cluster "github.com/bsm/sarama-cluster" - -type KafkaProvisionerConfig struct { - Brokers []string - ConsumerMode cluster.ConsumerMode -} diff --git a/contrib/kafka/pkg/controller/util.go b/contrib/kafka/pkg/controller/util.go index 7b3a56d448e..5450f890f2d 100644 --- a/contrib/kafka/pkg/controller/util.go +++ b/contrib/kafka/pkg/controller/util.go @@ -5,17 +5,15 @@ import ( "log" "strings" - cluster "github.com/bsm/sarama-cluster" - + "github.com/bsm/sarama-cluster" + . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" "github.com/knative/pkg/configmap" ) const ( - BrokerConfigMapKey = "bootstrap_servers" - ConsumerModeConfigMapKey = "consumer_mode" - ConsumerModePartitionConsumerValue = "partitions" - ConsumerModeMultiplexConsumerValue = "multiplex" - KafkaChannelSeparator = "." + BrokerConfigMapKey = "bootstrap_servers" + ConsumerModeConfigMapKey = "consumer_mode" + KafkaChannelSeparator = "." ) // GetProvisionerConfig returns the details of the associated ClusterChannelProvisioner object diff --git a/contrib/kafka/pkg/controller/util_test.go b/contrib/kafka/pkg/controller/util_test.go index 526f183bd86..8ab18d63855 100644 --- a/contrib/kafka/pkg/controller/util_test.go +++ b/contrib/kafka/pkg/controller/util_test.go @@ -6,9 +6,9 @@ import ( "path/filepath" "testing" - cluster "github.com/bsm/sarama-cluster" - + "github.com/bsm/sarama-cluster" "github.com/google/go-cmp/cmp" + . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" _ "github.com/knative/pkg/system/testing" ) diff --git a/contrib/kafka/pkg/reconciler/types.go b/contrib/kafka/pkg/reconciler/types.go new file mode 100644 index 00000000000..58a4b21e7b6 --- /dev/null +++ b/contrib/kafka/pkg/reconciler/types.go @@ -0,0 +1,19 @@ +package reconciler + +import cluster "github.com/bsm/sarama-cluster" + +const ( + ConsumerModePartitionConsumerValue = "partitions" + ConsumerModeMultiplexConsumerValue = "multiplex" + + // DefaultNumPartitions defines the default number of partitions + DefaultNumPartitions = 1 + + // DefaultReplicationFactor defines the default number of replications + DefaultReplicationFactor = 1 +) + +type KafkaProvisionerConfig struct { + Brokers []string + ConsumerMode cluster.ConsumerMode +} From 01e0efc26ab54dede269a4868f9f06b974d37dca Mon Sep 17 00:00:00 2001 From: nachocano Date: Mon, 20 May 2019 16:20:46 -0700 Subject: [PATCH 08/64] updates to UTs --- contrib/kafka/cmd/controller/main.go | 19 +- contrib/kafka/config/500-dispatcher.yaml | 1 + .../messaging/v1alpha1/crd_validation_test.go | 43 ----- .../v1alpha1/kafka_channel_defaults.go | 4 +- .../v1alpha1/kafka_channel_defaults_test.go | 32 ++++ .../v1alpha1/kafka_channel_lifecycle.go | 28 +-- .../messaging/v1alpha1/kafka_channel_types.go | 28 +-- .../v1alpha1/kafka_channel_validation.go | 11 ++ .../v1alpha1/kafka_channel_validation_test.go | 179 +++++++++++++----- contrib/kafka/pkg/reconciler/types.go | 18 +- 10 files changed, 240 insertions(+), 123 deletions(-) delete mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/crd_validation_test.go diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index 96d500834e7..041a0a3079d 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -1,3 +1,19 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package main import ( @@ -9,6 +25,7 @@ import ( provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" + . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" "go.uber.org/zap" @@ -24,7 +41,7 @@ import ( type SchemeFunc func(*runtime.Scheme) error // ProvideFunc adds a controller to a Manager. -type ProvideFunc func(mgr manager.Manager, config *provisionerController.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) +type ProvideFunc func(mgr manager.Manager, config *KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) func main() { os.Exit(_main()) diff --git a/contrib/kafka/config/500-dispatcher.yaml b/contrib/kafka/config/500-dispatcher.yaml index 73c2723a298..baa074ff16d 100644 --- a/contrib/kafka/config/500-dispatcher.yaml +++ b/contrib/kafka/config/500-dispatcher.yaml @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + apiVersion: apps/v1 kind: Deployment metadata: diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/crd_validation_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/crd_validation_test.go deleted file mode 100644 index 557b0f87987..00000000000 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/crd_validation_test.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/knative/pkg/apis" - "github.com/knative/pkg/webhook" -) - -type CRDTest struct { - name string - cr webhook.GenericCRD - want *apis.FieldError -} - -func doValidateTest(t *testing.T, tests []CRDTest) { - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - got := test.cr.Validate(context.TODO()) - if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { - t.Errorf("%s: validate (-want, +got) = %v", test.name, diff) - } - }) - } -} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go index f8299d9d085..0aa0909ad24 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go @@ -29,10 +29,10 @@ func (cs *KafkaChannelSpec) SetDefaults(ctx context.Context) { if cs.ConsumerMode == "" { cs.ConsumerMode = ConsumerModeMultiplexConsumerValue } - if cs.NumPartitions == 0 { + if cs.NumPartitions <= 0 { cs.NumPartitions = DefaultNumPartitions } - if cs.ReplicationFactor == 0 { + if cs.ReplicationFactor <= 0 { cs.ReplicationFactor = DefaultReplicationFactor } } diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go index 27e7e6a4690..601f6acbbaf 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go @@ -75,6 +75,22 @@ func TestKafkaChannelDefaults(t *testing.T) { }, }, }, + "numPartitions negative": { + initial: KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: ConsumerModeMultiplexConsumerValue, + ReplicationFactor: testReplicationFactor, + NumPartitions: -10, + }, + }, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: ConsumerModeMultiplexConsumerValue, + NumPartitions: DefaultNumPartitions, + ReplicationFactor: testReplicationFactor, + }, + }, + }, "replicationFactor not set": { initial: KafkaChannel{ Spec: KafkaChannelSpec{ @@ -90,6 +106,22 @@ func TestKafkaChannelDefaults(t *testing.T) { }, }, }, + "replicationFactor negative": { + initial: KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: ConsumerModeMultiplexConsumerValue, + NumPartitions: testNumPartitions, + ReplicationFactor: -10, + }, + }, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: ConsumerModeMultiplexConsumerValue, + NumPartitions: testNumPartitions, + ReplicationFactor: DefaultReplicationFactor, + }, + }, + }, } for n, tc := range testCases { t.Run(n, func(t *testing.T) { diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go index 41be5d5d268..3acb6c4dd15 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go @@ -1,18 +1,18 @@ /* - * Copyright 2019 The Knative Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ package v1alpha1 diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go index 33ffa4d250b..1cfe1cbdf51 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go @@ -1,18 +1,18 @@ /* - * Copyright 2019 The Knative Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ package v1alpha1 diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go index a3e9d811cd6..6f96db27866 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go @@ -35,6 +35,7 @@ func (cs *KafkaChannelSpec) Validate(ctx context.Context) *apis.FieldError { fe := apis.ErrMissingField("bootstrapServers") errs = errs.Also(fe) } + if cs.ConsumerMode == "" { fe := apis.ErrMissingField("consumerMode") errs = errs.Also(fe) @@ -43,6 +44,16 @@ func (cs *KafkaChannelSpec) Validate(ctx context.Context) *apis.FieldError { errs = errs.Also(fe) } + if cs.NumPartitions < 0 { + fe := apis.ErrInvalidValue(cs.NumPartitions, "numPartitions") + errs = errs.Also(fe) + } + + if cs.ReplicationFactor < 0 { + fe := apis.ErrInvalidValue(cs.ReplicationFactor, "replicationFactor") + errs = errs.Also(fe) + } + if cs.Subscribable != nil { for i, subscriber := range cs.Subscribable.Subscribers { if subscriber.ReplyURI == "" && subscriber.SubscriberURI == "" { diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go index 69415b762cb..754595f0bfd 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go @@ -17,6 +17,9 @@ limitations under the License. package v1alpha1 import ( + "context" + "github.com/google/go-cmp/cmp" + "github.com/knative/pkg/webhook" "testing" eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" @@ -24,60 +27,140 @@ import ( ) func TestKafkaChannelValidation(t *testing.T) { - tests := []CRDTest{{ - name: "empty", - cr: &KafkaChannel{ - Spec: KafkaChannelSpec{}, + testCases := map[string]struct { + cr webhook.GenericCRD + want *apis.FieldError + }{ + "empty spec": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{}, + }, + want: func() *apis.FieldError { + fe := apis.ErrMissingField("spec.bootstrapServers, spec.consumerMode") + return fe + }(), + }, + "empty bootstrapServers": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: "multiplex", + }, + }, + want: func() *apis.FieldError { + fe := apis.ErrMissingField("spec.bootstrapServers") + return fe + }(), + }, + "empty consumerMode": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + BootstrapServers: "bootstrap_srvs", + }, + }, + want: func() *apis.FieldError { + fe := apis.ErrMissingField("spec.consumerMode") + return fe + }(), + }, + "invalid consumerMode": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: "invalid_value", + BootstrapServers: "bootstrap_srvs", + }, + }, + want: func() *apis.FieldError { + fe := apis.ErrInvalidValue("invalid_value", "spec.consumerMode") + return fe + }(), }, - want: nil, - }, { - name: "valid subscribers array", - cr: &KafkaChannel{ - Spec: KafkaChannelSpec{ - Subscribable: &eventingduck.Subscribable{ - Subscribers: []eventingduck.ChannelSubscriberSpec{{ - SubscriberURI: "subscriberendpoint", - ReplyURI: "resultendpoint", + "negative numPartitions": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: "multiplex", + BootstrapServers: "bootstrap_srvs", + NumPartitions: -10, + }, + }, + want: func() *apis.FieldError { + fe := apis.ErrInvalidValue(-10, "spec.numPartitions") + return fe + }(), + }, + "negative replicationFactor": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: "multiplex", + BootstrapServers: "bootstrap_srvs", + ReplicationFactor: -10, + }, + }, + want: func() *apis.FieldError { + fe := apis.ErrInvalidValue(-10, "spec.replicationFactor") + return fe + }(), + }, + "valid subscribers array": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: "multiplex", + BootstrapServers: "bootstrap_srvs", + Subscribable: &eventingduck.Subscribable{ + Subscribers: []eventingduck.ChannelSubscriberSpec{{ + SubscriberURI: "subscriberendpoint", + ReplyURI: "resultendpoint", + }}, }}, - }}, + }, + want: nil, }, - want: nil, - }, { - name: "empty subscriber at index 1", - cr: &KafkaChannel{ - Spec: KafkaChannelSpec{ - Subscribable: &eventingduck.Subscribable{ - Subscribers: []eventingduck.ChannelSubscriberSpec{{ - SubscriberURI: "subscriberendpoint", - ReplyURI: "replyendpoint", - }, {}}, - }}, + "empty subscriber at index 1": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: "multiplex", + BootstrapServers: "bootstrap_srvs", + Subscribable: &eventingduck.Subscribable{ + Subscribers: []eventingduck.ChannelSubscriberSpec{{ + SubscriberURI: "subscriberendpoint", + ReplyURI: "replyendpoint", + }, {}}, + }}, + }, + want: func() *apis.FieldError { + fe := apis.ErrMissingField("spec.subscribable.subscriber[1].replyURI", "spec.subscribable.subscriber[1].subscriberURI") + fe.Details = "expected at least one of, got none" + return fe + }(), }, - want: func() *apis.FieldError { - fe := apis.ErrMissingField("spec.subscribable.subscriber[1].replyURI", "spec.subscribable.subscriber[1].subscriberURI") - fe.Details = "expected at least one of, got none" - return fe - }(), - }, { - name: "2 empty subscribers", - cr: &KafkaChannel{ - Spec: KafkaChannelSpec{ - Subscribable: &eventingduck.Subscribable{ - Subscribers: []eventingduck.ChannelSubscriberSpec{{}, {}}, + "two empty subscribers": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + ConsumerMode: "multiplex", + BootstrapServers: "bootstrap_srvs", + Subscribable: &eventingduck.Subscribable{ + Subscribers: []eventingduck.ChannelSubscriberSpec{{}, {}}, + }, }, }, + want: func() *apis.FieldError { + var errs *apis.FieldError + fe := apis.ErrMissingField("spec.subscribable.subscriber[0].replyURI", "spec.subscribable.subscriber[0].subscriberURI") + fe.Details = "expected at least one of, got none" + errs = errs.Also(fe) + fe = apis.ErrMissingField("spec.subscribable.subscriber[1].replyURI", "spec.subscribable.subscriber[1].subscriberURI") + fe.Details = "expected at least one of, got none" + errs = errs.Also(fe) + return errs + }(), }, - want: func() *apis.FieldError { - var errs *apis.FieldError - fe := apis.ErrMissingField("spec.subscribable.subscriber[0].replyURI", "spec.subscribable.subscriber[0].subscriberURI") - fe.Details = "expected at least one of, got none" - errs = errs.Also(fe) - fe = apis.ErrMissingField("spec.subscribable.subscriber[1].replyURI", "spec.subscribable.subscriber[1].subscriberURI") - fe.Details = "expected at least one of, got none" - errs = errs.Also(fe) - return errs - }(), - }} + } - doValidateTest(t, tests) + for n, test := range testCases { + t.Run(n, func(t *testing.T) { + got := test.cr.Validate(context.Background()) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("%s: validate (-want, +got) = %v", n, diff) + } + }) + } } diff --git a/contrib/kafka/pkg/reconciler/types.go b/contrib/kafka/pkg/reconciler/types.go index 58a4b21e7b6..fea84c8b0db 100644 --- a/contrib/kafka/pkg/reconciler/types.go +++ b/contrib/kafka/pkg/reconciler/types.go @@ -1,6 +1,22 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package reconciler -import cluster "github.com/bsm/sarama-cluster" +import "github.com/bsm/sarama-cluster" const ( ConsumerModePartitionConsumerValue = "partitions" From c3086529da8a645be20c6f23b755cf97ff1c4808 Mon Sep 17 00:00:00 2001 From: nachocano Date: Mon, 20 May 2019 16:29:12 -0700 Subject: [PATCH 09/64] adding validation that none is empty --- .../messaging/v1alpha1/kafka_channel_validation.go | 9 +++++++++ .../v1alpha1/kafka_channel_validation_test.go | 12 ++++++++++++ 2 files changed, 21 insertions(+) diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go index 6f96db27866..010c4c94434 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go @@ -20,6 +20,7 @@ import ( "context" "fmt" . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" + "strings" "github.com/knative/pkg/apis" ) @@ -34,6 +35,14 @@ func (cs *KafkaChannelSpec) Validate(ctx context.Context) *apis.FieldError { if cs.BootstrapServers == "" { fe := apis.ErrMissingField("bootstrapServers") errs = errs.Also(fe) + } else { + bootstrapServers := strings.Split(cs.BootstrapServers, ",") + for i, s := range bootstrapServers { + if len(s) == 0 { + fe := apis.ErrMissingField(fmt.Sprintf("bootstrapServers[%d]", i)) + errs = errs.Also(fe) + } + } } if cs.ConsumerMode == "" { diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go index 754595f0bfd..f836c9161fc 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go @@ -51,6 +51,18 @@ func TestKafkaChannelValidation(t *testing.T) { return fe }(), }, + "empty bootstrapServer at index 1": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + BootstrapServers: "bootstrap_srvs,", + ConsumerMode: "multiplex", + }, + }, + want: func() *apis.FieldError { + fe := apis.ErrMissingField("spec.bootstrapServers[1]") + return fe + }(), + }, "empty consumerMode": { cr: &KafkaChannel{ Spec: KafkaChannelSpec{ From 7023499720188cc49b5c7cccf1b3e2ca7fd716df Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Mon, 20 May 2019 23:05:39 -0700 Subject: [PATCH 10/64] more updates --- .../pkg/reconciler/controller/kafkachannel.go | 204 +++++++++++++++--- .../reconciler/controller/resources/client.go | 33 +++ .../controller/resources/service.go | 95 ++++++++ .../controller/resources/service_test.go | 143 ++++++++++++ .../reconciler/controller/resources/topic.go | 10 + 5 files changed, 453 insertions(+), 32 deletions(-) create mode 100644 contrib/kafka/pkg/reconciler/controller/resources/client.go create mode 100644 contrib/kafka/pkg/reconciler/controller/resources/service.go create mode 100644 contrib/kafka/pkg/reconciler/controller/resources/service_test.go create mode 100644 contrib/kafka/pkg/reconciler/controller/resources/topic.go diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go index 466e2b964c0..d3e6c94604a 100644 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -18,13 +18,19 @@ package controller import ( "context" + "encoding/json" + "fmt" + "github.com/knative/eventing/pkg/reconciler/names" "reflect" + "strings" "time" + "github.com/Shopify/sarama" "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" messaginginformers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1" listers "github.com/knative/eventing/contrib/kafka/pkg/client/listers/messaging/v1alpha1" + "github.com/knative/eventing/contrib/kafka/pkg/reconciler/controller/resources" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/reconciler" "github.com/knative/pkg/controller" @@ -32,6 +38,9 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" appsv1informers "k8s.io/client-go/informers/apps/v1" corev1informers "k8s.io/client-go/informers/core/v1" appsv1listers "k8s.io/client-go/listers/apps/v1" @@ -63,6 +72,10 @@ type Reconciler struct { dispatcherDeploymentName string dispatcherServiceName string + // Using a shared kafkaClusterAdmin does not work currently because of an issue with + // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. + kafkaClusterAdmin sarama.ClusterAdmin + eventingClientSet *versioned.Clientset kafkachannelLister listers.KafkaChannelLister kafkachannelInformer cache.SharedIndexInformer @@ -193,13 +206,57 @@ func (r *Reconciler) Reconcile(ctx context.Context, key string) error { } func (r *Reconciler) reconcile(ctx context.Context, kc *v1alpha1.KafkaChannel) error { + logger := logging.FromContext(ctx) + kc.Status.InitializeConditions() + kafkaClusterAdmin, err := r.createClient(ctx, kc) + if err != nil { + logger.Error("Unable to build kafka admin client", zap.String("channel", kc.Name), zap.Error(err)) + return err + } + + // See if the channel has been deleted. if kc.DeletionTimestamp != nil { - // Everything is cleaned up by the garbage collector. + if err := r.deleteTopic(ctx, kc, kafkaClusterAdmin); err != nil { + return err + } + removeFinalizer(kc) return nil } + // If we are adding the finalizer for the first time, then ensure that finalizer is persisted + // before manipulating Kafka. + if err := r.ensureFinalizer(kc); err != nil { + return err + } + + if err := r.createTopic(ctx, kc, kafkaClusterAdmin); err != nil { + // kc.Status.MarkNotProvisioned("NotProvisioned", "error while provisioning: %s", err) + return err + } + + // Reconcile the k8s service representing the actual Channel. It points to the Dispatcher service via + // ExternalName + svc, err := r.reconcileChannelService(ctx, kc) + if err != nil { + kc.Status.MarkChannelServiceFailed("ChannelServiceFailed", fmt.Sprintf("Channel Service failed: %s", err)) + return err + } + kc.Status.MarkChannelServiceTrue() + kc.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace)) + + // close the connection + err = kafkaClusterAdmin.Close() + if err != nil { + logger.Error("Error closing the connection", zap.Error(err)) + return err + } + + // Ok, so now the Dispatcher Deployment & Service have been created, we're golden since the + // dispatcher watches the Channel and where it needs to dispatch events to. + return nil + // We reconcile the status of the Channel by looking at: // 1. Dispatcher Deployment for it's readiness. // 2. Dispatcher k8s Service for it's existence. @@ -271,37 +328,37 @@ func (r *Reconciler) reconcile(ctx context.Context, kc *v1alpha1.KafkaChannel) e return nil } -//func (r *Reconciler) reconcileChannelService(ctx context.Context, imc *v1alpha1.InMemoryChannel) (*corev1.Service, error) { -// // Get the Service and propagate the status to the Channel in case it does not exist. -// // We don't do anything with the service because it's status contains nothing useful, so just do -// // an existence check. Then below we check the endpoints targeting it. -// // We may change this name later, so we have to ensure we use proper addressable when resolving these. -// svc, err := r.serviceLister.Services(imc.Namespace).Get(fmt.Sprintf("%s-kn-channel", imc.Name)) -// if err != nil { -// if apierrs.IsNotFound(err) { -// svc, err = resources.NewK8sService(imc, resources.ExternalService(r.dispatcherNamespace, r.dispatcherServiceName)) -// if err != nil { -// logging.FromContext(ctx).Error("failed to create the channel service object", zap.Error(err)) -// return nil, err -// } -// svc, err = r.KubeClientSet.CoreV1().Services(imc.Namespace).Create(svc) -// if err != nil { -// logging.FromContext(ctx).Error("failed to create the channel service", zap.Error(err)) -// return nil, err -// } -// return svc, nil -// } else { -// logging.FromContext(ctx).Error("Unable to get the channel service", zap.Error(err)) -// } -// return nil, err -// } -// -// // Check to make sure that our IMC owns this service and if not, complain. -// if !metav1.IsControlledBy(svc, imc) { -// return nil, fmt.Errorf("inmemorychannel: %s/%s does not own Service: %q", imc.Namespace, imc.Name, svc.Name) -// } -// return svc, nil -//} +func (r *Reconciler) reconcileChannelService(ctx context.Context, channel *v1alpha1.KafkaChannel) (*corev1.Service, error) { + logger := logging.FromContext(ctx) + // Get the Service and propagate the status to the Channel in case it does not exist. + // We don't do anything with the service because it's status contains nothing useful, so just do + // an existence check. Then below we check the endpoints targeting it. + // We may change this name later, so we have to ensure we use proper addressable when resolving these. + svc, err := r.serviceLister.Services(channel.Namespace).Get(resources.MakeChannelServiceName(channel.Name)) + if err != nil { + if apierrs.IsNotFound(err) { + svc, err = resources.MakeService(channel, resources.ExternalService(r.dispatcherNamespace, r.dispatcherServiceName)) + if err != nil { + logger.Error("Failed to create the channel service object", zap.Error(err)) + return nil, err + } + svc, err = r.KubeClientSet.CoreV1().Services(channel.Namespace).Create(svc) + if err != nil { + logger.Error("Failed to create the channel service", zap.Error(err)) + return nil, err + } + return svc, nil + } else { + logger.Error("Unable to get the channel service", zap.Error(err)) + } + return nil, err + } + // Check to make sure that the KafkaChannel owns this service and if not, complain. + if !metav1.IsControlledBy(svc, channel) { + return nil, fmt.Errorf("kafkachannel: %s/%s does not own Service: %q", channel.Namespace, channel.Name, svc.Name) + } + return svc, nil +} func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) { kc, err := r.kafkachannelLister.KafkaChannels(desired.Namespace).Get(desired.Name) @@ -328,3 +385,86 @@ func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.KafkaCh return new, err } + +func (r *Reconciler) createClient(ctx context.Context, kc *v1alpha1.KafkaChannel) (sarama.ClusterAdmin, error) { + // We don't currently initialize r.kafkaClusterAdmin, hence we end up creating the cluster admin client every time. + // This is because of an issue with Shopify/sarama. See https://github.com/Shopify/sarama/issues/1162. + // Once the issue is fixed we should use a shared cluster admin client. Also, r.kafkaClusterAdmin is currently + // used to pass a fake admin client in the tests. + kafkaClusterAdmin := r.kafkaClusterAdmin + if kafkaClusterAdmin == nil { + var err error + args := &resources.ClientArgs{ + ClientID: controllerAgentName, + BootstrapServers: strings.Split(kc.Spec.BootstrapServers, ","), + } + kafkaClusterAdmin, err = resources.MakeClient(args) + if err != nil { + return nil, err + } + } + return kafkaClusterAdmin, nil +} + +func (r *Reconciler) createTopic(ctx context.Context, channel *v1alpha1.KafkaChannel, kafkaClusterAdmin sarama.ClusterAdmin) error { + logger := logging.FromContext(ctx) + topicName := resources.MakeTopicName(channel) + logger.Info("Creating topic on Kafka cluster", zap.String("topic", topicName)) + + err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ + ReplicationFactor: channel.Spec.ReplicationFactor, + NumPartitions: channel.Spec.NumPartitions, + }, false) + if err == sarama.ErrTopicAlreadyExists { + return nil + } else if err != nil { + logger.Error("Error creating topic", zap.String("topic", topicName), zap.Error(err)) + } else { + logger.Info("Successfully created topic", zap.String("topic", topicName)) + } + return err +} + +func (r *Reconciler) deleteTopic(ctx context.Context, channel *v1alpha1.KafkaChannel, kafkaClusterAdmin sarama.ClusterAdmin) error { + logger := logging.FromContext(ctx) + + topicName := resources.MakeTopicName(channel) + logger.Info("Deleting topic on Kafka Cluster", zap.String("topic", topicName)) + err := kafkaClusterAdmin.DeleteTopic(topicName) + if err == sarama.ErrUnknownTopicOrPartition { + return nil + } else if err != nil { + logger.Error("Error deleting topic", zap.String("topic", topicName), zap.Error(err)) + } else { + logger.Info("Successfully deleted topic", zap.String("topic", topicName)) + } + return err +} + +func (r *Reconciler) ensureFinalizer(channel *v1alpha1.KafkaChannel) error { + finalizers := sets.NewString(channel.Finalizers...) + if finalizers.Has(finalizerName) { + return nil + } + + mergePatch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": append(channel.Finalizers, finalizerName), + "resourceVersion": channel.ResourceVersion, + }, + } + + patch, err := json.Marshal(mergePatch) + if err != nil { + return err + } + + _, err = r.eventingClientSet.MessagingV1alpha1().KafkaChannels(channel.Namespace).Patch(channel.Name, types.MergePatchType, patch) + return err +} + +func removeFinalizer(channel *v1alpha1.KafkaChannel) { + finalizers := sets.NewString(channel.Finalizers...) + finalizers.Delete(finalizerName) + channel.Finalizers = finalizers.List() +} diff --git a/contrib/kafka/pkg/reconciler/controller/resources/client.go b/contrib/kafka/pkg/reconciler/controller/resources/client.go new file mode 100644 index 00000000000..40f397d92e2 --- /dev/null +++ b/contrib/kafka/pkg/reconciler/controller/resources/client.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "github.com/Shopify/sarama" +) + +type ClientArgs struct { + ClientID string + BootstrapServers []string +} + +func MakeClient(args *ClientArgs) (sarama.ClusterAdmin, error) { + saramaConf := sarama.NewConfig() + saramaConf.Version = sarama.V1_1_0_0 + saramaConf.ClientID = args.ClientID + return sarama.NewClusterAdmin(args.BootstrapServers, saramaConf) +} diff --git a/contrib/kafka/pkg/reconciler/controller/resources/service.go b/contrib/kafka/pkg/reconciler/controller/resources/service.go new file mode 100644 index 00000000000..8334d60a131 --- /dev/null +++ b/contrib/kafka/pkg/reconciler/controller/resources/service.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "github.com/knative/eventing/pkg/utils" + "github.com/knative/pkg/kmeta" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + PortName = "http" + PortNumber = 80 + MessagingRoleLabel = "messaging.knative.dev/role" + MessagingRole = "kafka-channel" +) + +// ServiceOption can be used to optionally modify the K8s service in MakeService. +type ServiceOption func(*corev1.Service) error + +func MakeExternalServiceAddress(namespace, service string) string { + return fmt.Sprintf("%s.%s.svc.%s", service, namespace, utils.GetClusterDomainName()) +} + +func MakeChannelServiceName(name string) string { + return fmt.Sprintf("%s-kn-channel", name) +} + +// ExternalService is a functional option for MakeService to create a K8s service of type ExternalName +// pointing to the specified service in a namespace. +func ExternalService(namespace, service string) ServiceOption { + return func(svc *corev1.Service) error { + svc.Spec = corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: MakeExternalServiceAddress(namespace, service), + } + return nil + } +} + +// MakeService creates a new K8s Service for a Channel resource. It also sets the appropriate +// OwnerReferences on the resource so handleObject can discover the Channel resource that 'owns' it. +// As well as being garbage collected when the Channel is deleted. +func MakeService(kc *v1alpha1.KafkaChannel, opts ...ServiceOption) (*corev1.Service, error) { + // Add annotations + svc := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: MakeChannelServiceName(kc.ObjectMeta.Name), + Namespace: kc.Namespace, + Labels: map[string]string{ + MessagingRoleLabel: MessagingRole, + }, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(kc), + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: PortName, + Protocol: corev1.ProtocolTCP, + Port: PortNumber, + }, + }, + }, + } + for _, opt := range opts { + if err := opt(svc); err != nil { + return nil, err + } + } + return svc, nil +} diff --git a/contrib/kafka/pkg/reconciler/controller/resources/service_test.go b/contrib/kafka/pkg/reconciler/controller/resources/service_test.go new file mode 100644 index 00000000000..6087c9d2d6f --- /dev/null +++ b/contrib/kafka/pkg/reconciler/controller/resources/service_test.go @@ -0,0 +1,143 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "errors" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "github.com/knative/pkg/kmeta" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + serviceName = "my-test-service" + kcName = "my-test-kc" + testNS = "my-test-ns" + dispatcherNS = "dispatcher-namespace" + dispatcherName = "dispatcher-name" +) + +func TestMakeExternalServiceAddress(t *testing.T) { + if want, got := "my-test-service.my-test-ns.svc.cluster.local", MakeExternalServiceAddress(testNS, serviceName); want != got { + t.Errorf("Want: %q got %q", want, got) + } +} + +func TestMakeChannelServiceAddress(t *testing.T) { + if want, got := "my-test-imc-kn-channel", MakeChannelServiceName(kcName); want != got { + t.Errorf("Want: %q got %q", want, got) + } +} + +func TestMakeService(t *testing.T) { + imc := &v1alpha1.KafkaChannel{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcName, + Namespace: testNS, + }, + } + want := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-kn-channel", kcName), + Namespace: testNS, + Labels: map[string]string{ + MessagingRoleLabel: MessagingRole, + }, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(imc), + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: PortName, + Protocol: corev1.ProtocolTCP, + Port: PortNumber, + }, + }, + }, + } + + got, err := MakeService(imc) + if err != nil { + t.Fatalf("Failed to create new service: %s", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("unexpected condition (-want, +got) = %v", diff) + } +} + +func TestMakeServiceWithExternal(t *testing.T) { + imc := &v1alpha1.KafkaChannel{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcName, + Namespace: testNS, + }, + } + want := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-kn-channel", kcName), + Namespace: testNS, + Labels: map[string]string{ + MessagingRoleLabel: MessagingRole, + }, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(imc), + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "dispatcher-name.dispatcher-namespace.svc.cluster.local", + }, + } + + got, err := MakeService(imc, ExternalService(dispatcherNS, dispatcherName)) + if err != nil { + t.Fatalf("Failed to create new service: %s", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("unexpected condition (-want, +got) = %v", diff) + } +} + +func TestMakeServiceWithFailingOption(t *testing.T) { + imc := &v1alpha1.KafkaChannel{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcName, + Namespace: testNS, + }, + } + _, err := MakeService(imc, func(svc *corev1.Service) error { return errors.New("test-induced failure") }) + if err == nil { + t.Fatalf("Expcted error from new service but got none") + } +} diff --git a/contrib/kafka/pkg/reconciler/controller/resources/topic.go b/contrib/kafka/pkg/reconciler/controller/resources/topic.go new file mode 100644 index 00000000000..1871e469289 --- /dev/null +++ b/contrib/kafka/pkg/reconciler/controller/resources/topic.go @@ -0,0 +1,10 @@ +package resources + +import ( + "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" +) + +func MakeTopicName(channel *v1alpha1.KafkaChannel) string { + return fmt.Sprintf("%s.%s", channel.Namespace, channel.Name) +} From 9aeaa7f2689c0fed165b2db474096c4f056a382e Mon Sep 17 00:00:00 2001 From: nachocano Date: Tue, 21 May 2019 17:01:38 -0700 Subject: [PATCH 11/64] more updates --- contrib/kafka/cmd/channel_dispatcher/main.go | 1 - contrib/kafka/config/500-dispatcher.yaml | 10 +- .../v1alpha1/kafka_channel_lifecycle.go | 12 ++ .../v1alpha1/kafka_channel_lifecycle_test.go | 35 +++++ .../messaging/v1alpha1/kafka_channel_types.go | 8 +- .../pkg/reconciler/controller/kafkachannel.go | 139 ++++++++---------- .../controller/resources/service.go | 16 +- .../controller/resources/service_test.go | 12 +- .../pkg/reconciler/dispatcher/kafkachannel.go | 1 - 9 files changed, 132 insertions(+), 102 deletions(-) delete mode 100644 contrib/kafka/cmd/channel_dispatcher/main.go delete mode 100644 contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go diff --git a/contrib/kafka/cmd/channel_dispatcher/main.go b/contrib/kafka/cmd/channel_dispatcher/main.go deleted file mode 100644 index 06ab7d0f9a3..00000000000 --- a/contrib/kafka/cmd/channel_dispatcher/main.go +++ /dev/null @@ -1 +0,0 @@ -package main diff --git a/contrib/kafka/config/500-dispatcher.yaml b/contrib/kafka/config/500-dispatcher.yaml index baa074ff16d..4787808f34d 100644 --- a/contrib/kafka/config/500-dispatcher.yaml +++ b/contrib/kafka/config/500-dispatcher.yaml @@ -30,16 +30,16 @@ spec: serviceAccountName: kafka-dispatcher containers: - name: dispatcher - image: github.com/knative/eventing/contrib/kafka/cmd/channel_dispatcher + image: github.com/knative/eventing/contrib/kafka/cmd/dispatcher env: - name: SYSTEM_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - - name: config-channel - mountPath: /etc/config-channel + - name: kafka-channel-controller-config + mountPath: /etc/config-provisioner volumes: - - name: config-channel + - name: kafka-channel-controller-config configMap: - name: kafka-channel-config + name: kafka-channel-controller-config diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go index 3acb6c4dd15..686544c4a3e 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go @@ -23,6 +23,7 @@ import ( ) var kc = duckv1alpha1.NewLivingConditionSet( + KafkaChannelConditionTopicReady, KafkaChannelConditionDispatcherReady, KafkaChannelConditionServiceReady, KafkaChannelConditionEndpointsReady, @@ -54,6 +55,9 @@ const ( // KafkaChannelConditionServiceReady has status True when a k8s Service representing the channel is ready. // Because this uses ExternalName, there are no endpoints to check. KafkaChannelConditionChannelServiceReady duckv1alpha1.ConditionType = "ChannelServiceReady" + + // KafkaChannelConditionTopicReady has status True when the Kafka topic to use by the channel exists. + KafkaChannelConditionTopicReady duckv1alpha1.ConditionType = "TopicReady" ) // GetCondition returns the condition currently associated with the given type, or nil. @@ -121,3 +125,11 @@ func (cs *KafkaChannelStatus) MarkEndpointsFailed(reason, messageFormat string, func (cs *KafkaChannelStatus) MarkEndpointsTrue() { kc.Manage(cs).MarkTrue(KafkaChannelConditionEndpointsReady) } + +func (cs *KafkaChannelStatus) MarkTopicTrue() { + kc.Manage(cs).MarkTrue(KafkaChannelConditionTopicReady) +} + +func (cs *KafkaChannelStatus) MarkTopicFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionTopicReady, reason, messageFormat, messageA...) +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go index 1b228561ad3..d6a86f6582e 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go @@ -51,6 +51,11 @@ var condDispatcherEndpointsReady = duckv1alpha1.Condition{ Status: corev1.ConditionTrue, } +var condTopicReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionTopicReady, + Status: corev1.ConditionTrue, +} + var condDispatcherAddressable = duckv1alpha1.Condition{ Type: KafkaChannelConditionAddressable, Status: corev1.ConditionTrue, @@ -143,6 +148,9 @@ func TestChannelInitializeConditions(t *testing.T) { }, { Type: KafkaChannelConditionServiceReady, Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionTopicReady, + Status: corev1.ConditionUnknown, }}, }, }, @@ -176,6 +184,9 @@ func TestChannelInitializeConditions(t *testing.T) { }, { Type: KafkaChannelConditionServiceReady, Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionTopicReady, + Status: corev1.ConditionUnknown, }}, }, }, @@ -209,6 +220,9 @@ func TestChannelInitializeConditions(t *testing.T) { }, { Type: KafkaChannelConditionServiceReady, Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionTopicReady, + Status: corev1.ConditionUnknown, }}, }, }, @@ -231,6 +245,7 @@ func TestChannelIsReady(t *testing.T) { markChannelServiceReady bool setAddress bool markEndpointsReady bool + markTopicReady bool wantReady bool dispatcherStatus *appsv1.DeploymentStatus }{{ @@ -240,6 +255,7 @@ func TestChannelIsReady(t *testing.T) { markEndpointsReady: true, dispatcherStatus: deploymentStatusReady, setAddress: true, + markTopicReady: true, wantReady: true, }, { name: "service not ready", @@ -248,6 +264,7 @@ func TestChannelIsReady(t *testing.T) { markEndpointsReady: true, dispatcherStatus: deploymentStatusReady, setAddress: true, + markTopicReady: true, wantReady: false, }, { name: "endpoints not ready", @@ -256,6 +273,7 @@ func TestChannelIsReady(t *testing.T) { markEndpointsReady: false, dispatcherStatus: deploymentStatusReady, setAddress: true, + markTopicReady: true, wantReady: false, }, { name: "deployment not ready", @@ -264,6 +282,7 @@ func TestChannelIsReady(t *testing.T) { markChannelServiceReady: false, dispatcherStatus: deploymentStatusNotReady, setAddress: true, + markTopicReady: true, wantReady: false, }, { name: "address not set", @@ -272,6 +291,7 @@ func TestChannelIsReady(t *testing.T) { markEndpointsReady: true, dispatcherStatus: deploymentStatusReady, setAddress: false, + markTopicReady: true, wantReady: false, }, { name: "channel service not ready", @@ -280,6 +300,16 @@ func TestChannelIsReady(t *testing.T) { markEndpointsReady: true, dispatcherStatus: deploymentStatusReady, setAddress: true, + markTopicReady: true, + wantReady: false, + }, { + name: "topic not ready", + markServiceReady: true, + markChannelServiceReady: true, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + markTopicReady: false, wantReady: false, }} for _, test := range tests { @@ -309,6 +339,11 @@ func TestChannelIsReady(t *testing.T) { } else { cs.MarkDispatcherFailed("NotReadyDispatcher", "testing") } + if test.markTopicReady { + cs.MarkTopicTrue() + } else { + cs.MarkTopicFailed("NotReadyTopic", "testing") + } got := cs.IsReady() if test.wantReady != got { t.Errorf("unexpected readiness: want %v, got %v", test.wantReady, got) diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go index 1cfe1cbdf51..2fa71aa17c3 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go @@ -38,7 +38,7 @@ type KafkaChannel struct { // Spec defines the desired state of the Channel. Spec KafkaChannelSpec `json:"spec,omitempty"` - // Status represents the current state of the Channel. This data may be out of + // Status represents the current state of the KafkaChannel. This data may be out of // date. // +optional Status KafkaChannelStatus `json:"status,omitempty"` @@ -64,14 +64,14 @@ type KafkaChannelSpec struct { // NumPartitions is the number of partitions of a Kafka topic. By default, it is set to 1. NumPartitions int32 `json:"numPartitions"` - //ReplicationFactor is the replication factor of a Kafka topic. By default, it is set to 1. + // ReplicationFactor is the replication factor of a Kafka topic. By default, it is set to 1. ReplicationFactor int16 `json:"replicationFactor"` - // Channel conforms to Duck type Subscribable. + // KafkaChannel conforms to Duck type Subscribable. Subscribable *eventingduck.Subscribable `json:"subscribable,omitempty"` } -// ChannelStatus represents the current state of a Channel. +// KafkaChannelStatus represents the current state of a KafkaChannel. type KafkaChannelStatus struct { // inherits duck/v1alpha1 Status, which currently provides: // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller. diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go index d3e6c94604a..70a67921c54 100644 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -231,13 +231,67 @@ func (r *Reconciler) reconcile(ctx context.Context, kc *v1alpha1.KafkaChannel) e return err } + // We reconcile the status of the Channel by looking at: + // 1. Kafka topic used by the channel. + // 2. Dispatcher Deployment for it's readiness. + // 3. Dispatcher k8s Service for it's existence. + // 4. Dispatcher endpoints to ensure that there's something backing the Service. + // 5. K8s service representing the channel that will use ExternalName to point to the Dispatcher k8s service. + if err := r.createTopic(ctx, kc, kafkaClusterAdmin); err != nil { - // kc.Status.MarkNotProvisioned("NotProvisioned", "error while provisioning: %s", err) + kc.Status.MarkTopicFailed("TopicCreateFailed", "error while creating topic: %s", err) + return err + } + + // Get the Dispatcher Deployment and propagate the status to the Channel + d, err := r.deploymentLister.Deployments(r.dispatcherNamespace).Get(r.dispatcherDeploymentName) + if err != nil { + if apierrs.IsNotFound(err) { + kc.Status.MarkDispatcherFailed("DispatcherDeploymentDoesNotExist", "Dispatcher Deployment does not exist") + } else { + logger.Error("Unable to get the dispatcher Deployment", zap.Error(err)) + kc.Status.MarkDispatcherFailed("DispatcherDeploymentGetFailed", "Failed to get dispatcher Deployment") + } + return err + } + kc.Status.PropagateDispatcherStatus(&d.Status) + + // Get the Dispatcher Service and propagate the status to the Channel in case it does not exist. + // We don't do anything with the service because it's status contains nothing useful, so just do + // an existence check. Then below we check the endpoints targeting it. + _, err = r.serviceLister.Services(r.dispatcherNamespace).Get(r.dispatcherServiceName) + if err != nil { + if apierrs.IsNotFound(err) { + kc.Status.MarkServiceFailed("DispatcherServiceDoesNotExist", "Dispatcher Service does not exist") + } else { + logger.Error("Unable to get the dispatcher service", zap.Error(err)) + kc.Status.MarkServiceFailed("DispatcherServiceGetFailed", "Failed to get dispatcher service") + } + return err + } + kc.Status.MarkServiceTrue() + + // Get the Dispatcher Service Endpoints and propagate the status to the Channel + // endpoints has the same name as the service, so not a bug. + e, err := r.endpointsLister.Endpoints(r.dispatcherNamespace).Get(r.dispatcherServiceName) + if err != nil { + if apierrs.IsNotFound(err) { + kc.Status.MarkEndpointsFailed("DispatcherEndpointsDoesNotExist", "Dispatcher Endpoints does not exist") + } else { + logger.Error("Unable to get the dispatcher endpoints", zap.Error(err)) + kc.Status.MarkEndpointsFailed("DispatcherEndpointsGetFailed", "Failed to get dispatcher endpoints") + } return err } - // Reconcile the k8s service representing the actual Channel. It points to the Dispatcher service via - // ExternalName + if len(e.Subsets) == 0 { + logger.Error("No endpoints found for Dispatcher service", zap.Error(err)) + kc.Status.MarkEndpointsFailed("DispatcherEndpointsNotReady", "There are no endpoints ready for Dispatcher service") + return fmt.Errorf("there are no endpoints ready for Dispatcher service %s", r.dispatcherServiceName) + } + kc.Status.MarkEndpointsTrue() + + // Reconcile the k8s service representing the actual Channel. It points to the Dispatcher service via ExternalName svc, err := r.reconcileChannelService(ctx, kc) if err != nil { kc.Status.MarkChannelServiceFailed("ChannelServiceFailed", fmt.Sprintf("Channel Service failed: %s", err)) @@ -256,76 +310,6 @@ func (r *Reconciler) reconcile(ctx context.Context, kc *v1alpha1.KafkaChannel) e // Ok, so now the Dispatcher Deployment & Service have been created, we're golden since the // dispatcher watches the Channel and where it needs to dispatch events to. return nil - - // We reconcile the status of the Channel by looking at: - // 1. Dispatcher Deployment for it's readiness. - // 2. Dispatcher k8s Service for it's existence. - // 3. Dispatcher endpoints to ensure that there's something backing the Service. - // 4. k8s service representing the channel that will use ExternalName to point to the Dispatcher k8s service - - // Get the Dispatcher Deployment and propagate the status to the Channel - //d, err := r.deploymentLister.Deployments(r.dispatcherNamespace).Get(r.dispatcherDeploymentName) - //if err != nil { - // if apierrs.IsNotFound(err) { - // imc.Status.MarkDispatcherFailed("DispatcherDeploymentDoesNotExist", "Dispatcher Deployment does not exist") - // } else { - // logging.FromContext(ctx).Error("Unable to get the dispatcher Deployment", zap.Error(err)) - // imc.Status.MarkDispatcherFailed("DispatcherDeploymentGetFailed", "Failed to get dispatcher Deployment") - // } - // return err - //} - //imc.Status.PropagateDispatcherStatus(&d.Status) - // - //// Get the Dispatcher Service and propagate the status to the Channel in case it does not exist. - //// We don't do anything with the service because it's status contains nothing useful, so just do - //// an existence check. Then below we check the endpoints targeting it. - //_, err = r.serviceLister.Services(r.dispatcherNamespace).Get(r.dispatcherServiceName) - //if err != nil { - // if apierrs.IsNotFound(err) { - // imc.Status.MarkServiceFailed("DispatcherServiceDoesNotExist", "Dispatcher Service does not exist") - // } else { - // logging.FromContext(ctx).Error("Unable to get the dispatcher service", zap.Error(err)) - // imc.Status.MarkServiceFailed("DispatcherServiceGetFailed", "Failed to get dispatcher service") - // } - // return err - //} - // - //imc.Status.MarkServiceTrue() - // - //// Get the Dispatcher Service Endpoints and propagate the status to the Channel - //// endpoints has the same name as the service, so not a bug. - //e, err := r.endpointsLister.Endpoints(r.dispatcherNamespace).Get(r.dispatcherServiceName) - //if err != nil { - // if apierrs.IsNotFound(err) { - // imc.Status.MarkEndpointsFailed("DispatcherEndpointsDoesNotExist", "Dispatcher Endpoints does not exist") - // } else { - // logging.FromContext(ctx).Error("Unable to get the dispatcher endpoints", zap.Error(err)) - // imc.Status.MarkEndpointsFailed("DispatcherEndpointsGetFailed", "Failed to get dispatcher endpoints") - // } - // return err - //} - // - //if len(e.Subsets) == 0 { - // logging.FromContext(ctx).Error("No endpoints found for Dispatcher service", zap.Error(err)) - // imc.Status.MarkEndpointsFailed("DispatcherEndpointsNotReady", "There are no endpoints ready for Dispatcher service") - // return errors.New("there are no endpoints ready for Dispatcher service") - //} - // - //imc.Status.MarkEndpointsTrue() - // - //// Reconcile the k8s service representing the actual Channel. It points to the Dispatcher service via - //// ExternalName - //svc, err := r.reconcileChannelService(ctx, imc) - //if err != nil { - // imc.Status.MarkChannelServiceFailed("ChannelServiceFailed", fmt.Sprintf("Channel Service failed: %s", err)) - // return err - //} - //imc.Status.MarkChannelServiceTrue() - //imc.Status.SetAddress(fmt.Sprintf("%s.%s.svc.%s", svc.Name, svc.Namespace, utils.GetClusterDomainName())) - - // Ok, so now the Dispatcher Deployment & Service have been created, we're golden since the - // dispatcher watches the Channel and where it needs to dispatch events to. - return nil } func (r *Reconciler) reconcileChannelService(ctx context.Context, channel *v1alpha1.KafkaChannel) (*corev1.Service, error) { @@ -337,7 +321,7 @@ func (r *Reconciler) reconcileChannelService(ctx context.Context, channel *v1alp svc, err := r.serviceLister.Services(channel.Namespace).Get(resources.MakeChannelServiceName(channel.Name)) if err != nil { if apierrs.IsNotFound(err) { - svc, err = resources.MakeService(channel, resources.ExternalService(r.dispatcherNamespace, r.dispatcherServiceName)) + svc, err = resources.MakeK8sService(channel, resources.ExternalService(r.dispatcherNamespace, r.dispatcherServiceName)) if err != nil { logger.Error("Failed to create the channel service object", zap.Error(err)) return nil, err @@ -380,9 +364,10 @@ func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.KafkaCh if err == nil && becomesReady { duration := time.Since(new.ObjectMeta.CreationTimestamp.Time) r.Logger.Infof("KafkaChannel %q became ready after %v", kc.Name, duration) - // TODO: stats + if err := r.StatsReporter.ReportReady("Channel", kc.Namespace, kc.Name, duration); err != nil { + r.Logger.Infof("Failed to record ready for KafkaChannel %q: %v", kc.Name, err) + } } - return new, err } @@ -408,9 +393,9 @@ func (r *Reconciler) createClient(ctx context.Context, kc *v1alpha1.KafkaChannel func (r *Reconciler) createTopic(ctx context.Context, channel *v1alpha1.KafkaChannel, kafkaClusterAdmin sarama.ClusterAdmin) error { logger := logging.FromContext(ctx) + topicName := resources.MakeTopicName(channel) logger.Info("Creating topic on Kafka cluster", zap.String("topic", topicName)) - err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ ReplicationFactor: channel.Spec.ReplicationFactor, NumPartitions: channel.Spec.NumPartitions, diff --git a/contrib/kafka/pkg/reconciler/controller/resources/service.go b/contrib/kafka/pkg/reconciler/controller/resources/service.go index 8334d60a131..b9d6ba72adb 100644 --- a/contrib/kafka/pkg/reconciler/controller/resources/service.go +++ b/contrib/kafka/pkg/reconciler/controller/resources/service.go @@ -27,13 +27,13 @@ import ( ) const ( - PortName = "http" - PortNumber = 80 + portName = "http" + portNumber = 80 MessagingRoleLabel = "messaging.knative.dev/role" MessagingRole = "kafka-channel" ) -// ServiceOption can be used to optionally modify the K8s service in MakeService. +// ServiceOption can be used to optionally modify the K8s service in MakeK8sService. type ServiceOption func(*corev1.Service) error func MakeExternalServiceAddress(namespace, service string) string { @@ -44,7 +44,7 @@ func MakeChannelServiceName(name string) string { return fmt.Sprintf("%s-kn-channel", name) } -// ExternalService is a functional option for MakeService to create a K8s service of type ExternalName +// ExternalService is a functional option for MakeK8sService to create a K8s service of type ExternalName // pointing to the specified service in a namespace. func ExternalService(namespace, service string) ServiceOption { return func(svc *corev1.Service) error { @@ -56,10 +56,10 @@ func ExternalService(namespace, service string) ServiceOption { } } -// MakeService creates a new K8s Service for a Channel resource. It also sets the appropriate +// MakeK8sService creates a new K8s Service for a Channel resource. It also sets the appropriate // OwnerReferences on the resource so handleObject can discover the Channel resource that 'owns' it. // As well as being garbage collected when the Channel is deleted. -func MakeService(kc *v1alpha1.KafkaChannel, opts ...ServiceOption) (*corev1.Service, error) { +func MakeK8sService(kc *v1alpha1.KafkaChannel, opts ...ServiceOption) (*corev1.Service, error) { // Add annotations svc := &corev1.Service{ TypeMeta: metav1.TypeMeta{ @@ -79,9 +79,9 @@ func MakeService(kc *v1alpha1.KafkaChannel, opts ...ServiceOption) (*corev1.Serv Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ { - Name: PortName, + Name: portName, Protocol: corev1.ProtocolTCP, - Port: PortNumber, + Port: portNumber, }, }, }, diff --git a/contrib/kafka/pkg/reconciler/controller/resources/service_test.go b/contrib/kafka/pkg/reconciler/controller/resources/service_test.go index 6087c9d2d6f..66dac7ff372 100644 --- a/contrib/kafka/pkg/reconciler/controller/resources/service_test.go +++ b/contrib/kafka/pkg/reconciler/controller/resources/service_test.go @@ -43,7 +43,7 @@ func TestMakeExternalServiceAddress(t *testing.T) { } func TestMakeChannelServiceAddress(t *testing.T) { - if want, got := "my-test-imc-kn-channel", MakeChannelServiceName(kcName); want != got { + if want, got := "my-test-kc-kn-channel", MakeChannelServiceName(kcName); want != got { t.Errorf("Want: %q got %q", want, got) } } @@ -73,15 +73,15 @@ func TestMakeService(t *testing.T) { Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ { - Name: PortName, + Name: portName, Protocol: corev1.ProtocolTCP, - Port: PortNumber, + Port: portNumber, }, }, }, } - got, err := MakeService(imc) + got, err := MakeK8sService(imc) if err != nil { t.Fatalf("Failed to create new service: %s", err) } @@ -119,7 +119,7 @@ func TestMakeServiceWithExternal(t *testing.T) { }, } - got, err := MakeService(imc, ExternalService(dispatcherNS, dispatcherName)) + got, err := MakeK8sService(imc, ExternalService(dispatcherNS, dispatcherName)) if err != nil { t.Fatalf("Failed to create new service: %s", err) } @@ -136,7 +136,7 @@ func TestMakeServiceWithFailingOption(t *testing.T) { Namespace: testNS, }, } - _, err := MakeService(imc, func(svc *corev1.Service) error { return errors.New("test-induced failure") }) + _, err := MakeK8sService(imc, func(svc *corev1.Service) error { return errors.New("test-induced failure") }) if err == nil { t.Fatalf("Expcted error from new service but got none") } diff --git a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go deleted file mode 100644 index 909218d5017..00000000000 --- a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go +++ /dev/null @@ -1 +0,0 @@ -package dispatcher From 7ee08638931997d54ccc9b237a035fe88c833ffe Mon Sep 17 00:00:00 2001 From: nachocano Date: Tue, 21 May 2019 17:09:38 -0700 Subject: [PATCH 12/64] broken tests... --- .../controller/kafkachannel_test.go | 428 ++++++++++++++++++ 1 file changed, 428 insertions(+) create mode 100644 contrib/kafka/pkg/reconciler/controller/kafkachannel_test.go diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel_test.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel_test.go new file mode 100644 index 00000000000..060ab9bdc27 --- /dev/null +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel_test.go @@ -0,0 +1,428 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "testing" + + "github.com/knative/eventing/pkg/apis/messaging/v1alpha1" + fakeclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake" + informers "github.com/knative/eventing/pkg/client/informers/externalversions" + "github.com/knative/eventing/pkg/reconciler" + reconciletesting "github.com/knative/eventing/pkg/reconciler/testing" + "github.com/knative/eventing/pkg/utils" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + "github.com/knative/pkg/controller" + "github.com/knative/pkg/kmeta" + logtesting "github.com/knative/pkg/logging/testing" + . "github.com/knative/pkg/reconciler/testing" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kubeinformers "k8s.io/client-go/informers" + fakekubeclientset "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/kubernetes/scheme" + clientgotesting "k8s.io/client-go/testing" +) + +const ( + systemNS = "knative-eventing" + testNS = "test-namespace" + imcName = "test-imc" + dispatcherDeploymentName = "test-deployment" + dispatcherServiceName = "test-service" + channelServiceAddress = "test-imc-kn-channel.test-namespace.svc.cluster.local" + + subscriberAPIVersion = "v1" + subscriberKind = "Service" + subscriberName = "subscriberName" + subscriberURI = "http://example.com/subscriber" +) + +var ( + trueVal = true + // deletionTime is used when objects are marked as deleted. Rfc3339Copy() + // truncates to seconds to match the loss of precision during serialization. + deletionTime = metav1.Now().Rfc3339Copy() +) + +func init() { + // Add types to scheme + _ = v1alpha1.AddToScheme(scheme.Scheme) + _ = duckv1alpha1.AddToScheme(scheme.Scheme) +} + +func TestNewController(t *testing.T) { + kubeClient := fakekubeclientset.NewSimpleClientset() + eventingClient := fakeclientset.NewSimpleClientset() + + // Create informer factories with fake clients. The second parameter sets the + // resync period to zero, disabling it. + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, 0) + eventingInformerFactory := informers.NewSharedInformerFactory(eventingClient, 0) + + // Eventing + imcInformer := eventingInformerFactory.Messaging().V1alpha1().InMemoryChannels() + + // Kube + serviceInformer := kubeInformerFactory.Core().V1().Services() + endpointsInformer := kubeInformerFactory.Core().V1().Endpoints() + deploymentInformer := kubeInformerFactory.Apps().V1().Deployments() + + c := NewController( + reconciler.Options{ + KubeClientSet: kubeClient, + EventingClientSet: eventingClient, + Logger: logtesting.TestLogger(t), + }, + systemNS, + dispatcherDeploymentName, + dispatcherServiceName, + imcInformer, + deploymentInformer, + serviceInformer, + endpointsInformer) + + if c == nil { + t.Fatalf("Failed to create with NewController") + } +} + +func TestAllCases(t *testing.T) { + imcKey := testNS + "/" + imcName + table := TableTest{ + { + Name: "bad workqueue key", + // Make sure Reconcile handles bad keys. + Key: "too/many/parts", + }, { + Name: "key not found", + // Make sure Reconcile handles good keys that don't exist. + Key: "foo/not-found", + }, { // TODO: there is a bug in the controller, it will query for "" + // Name: "trigger key not found ", + // Objects: []runtime.Object{ + // reconciletesting.NewTrigger(triggerName, testNS), + // }, + // Key: "foo/incomplete", + // WantErr: true, + // WantEvents: []string{ + // Eventf(corev1.EventTypeWarning, "ChannelReferenceFetchFailed", "Failed to validate spec.channel exists: s \"\" not found"), + // }, + }, { + Name: "deleting", + Key: imcKey, + Objects: []runtime.Object{ + reconciletesting.NewInMemoryChannel(imcName, testNS, + reconciletesting.WithInitInMemoryChannelConditions, + reconciletesting.WithInMemoryChannelDeleted)}, + WantErr: false, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Reconciled", "InMemoryChannel reconciled"), + }, + }, { + Name: "deployment does not exist", + Key: imcKey, + Objects: []runtime.Object{ + reconciletesting.NewInMemoryChannel(imcName, testNS), + }, + WantErr: true, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + reconciletesting.WithInitInMemoryChannelConditions, + reconciletesting.WithInMemoryChannelDeploymentNotReady("DispatcherDeploymentDoesNotExist", "Dispatcher Deployment does not exist")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: deployment.apps \"test-deployment\" not found"), + }, + }, { + Name: "Service does not exist", + Key: imcKey, + Objects: []runtime.Object{ + makeReadyDeployment(), + reconciletesting.NewInMemoryChannel(imcName, testNS), + }, + WantErr: true, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + reconciletesting.WithInitInMemoryChannelConditions, + reconciletesting.WithInMemoryChannelDeploymentReady(), + reconciletesting.WithInMemoryChannelServicetNotReady("DispatcherServiceDoesNotExist", "Dispatcher Service does not exist")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: service \"test-service\" not found"), + }, + }, { + Name: "Endpoints does not exist", + Key: imcKey, + Objects: []runtime.Object{ + makeReadyDeployment(), + makeService(), + reconciletesting.NewInMemoryChannel(imcName, testNS), + }, + WantErr: true, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + reconciletesting.WithInitInMemoryChannelConditions, + reconciletesting.WithInMemoryChannelDeploymentReady(), + reconciletesting.WithInMemoryChannelServiceReady(), + reconciletesting.WithInMemoryChannelEndpointsNotReady("DispatcherEndpointsDoesNotExist", "Dispatcher Endpoints does not exist"), + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: endpoints \"test-service\" not found"), + }, + }, { + Name: "Endpoints not ready", + Key: imcKey, + Objects: []runtime.Object{ + makeReadyDeployment(), + makeService(), + makeEmptyEndpoints(), + reconciletesting.NewInMemoryChannel(imcName, testNS), + }, + WantErr: true, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + reconciletesting.WithInitInMemoryChannelConditions, + reconciletesting.WithInMemoryChannelDeploymentReady(), + reconciletesting.WithInMemoryChannelServiceReady(), + reconciletesting.WithInMemoryChannelEndpointsNotReady("DispatcherEndpointsNotReady", "There are no endpoints ready for Dispatcher service"), + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: there are no endpoints ready for Dispatcher service"), + }, + }, { + Name: "Works, creates new channel", + Key: imcKey, + Objects: []runtime.Object{ + makeReadyDeployment(), + makeService(), + makeReadyEndpoints(), + reconciletesting.NewInMemoryChannel(imcName, testNS), + }, + WantErr: false, + WantCreates: []metav1.Object{ + makeChannelService(reconciletesting.NewInMemoryChannel(imcName, testNS)), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + reconciletesting.WithInitInMemoryChannelConditions, + reconciletesting.WithInMemoryChannelDeploymentReady(), + reconciletesting.WithInMemoryChannelServiceReady(), + reconciletesting.WithInMemoryChannelEndpointsReady(), + reconciletesting.WithInMemoryChannelChannelServiceReady(), + reconciletesting.WithInMemoryChannelAddress(channelServiceAddress), + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Reconciled", "InMemoryChannel reconciled"), + }, + }, { + Name: "Works, channel exists", + Key: imcKey, + Objects: []runtime.Object{ + makeReadyDeployment(), + makeService(), + makeReadyEndpoints(), + reconciletesting.NewInMemoryChannel(imcName, testNS), + makeChannelService(reconciletesting.NewInMemoryChannel(imcName, testNS)), + }, + WantErr: false, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + reconciletesting.WithInitInMemoryChannelConditions, + reconciletesting.WithInMemoryChannelDeploymentReady(), + reconciletesting.WithInMemoryChannelServiceReady(), + reconciletesting.WithInMemoryChannelEndpointsReady(), + reconciletesting.WithInMemoryChannelChannelServiceReady(), + reconciletesting.WithInMemoryChannelAddress(channelServiceAddress), + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Reconciled", "InMemoryChannel reconciled"), + }, + }, { + Name: "channel exists, not owned by us", + Key: imcKey, + Objects: []runtime.Object{ + makeReadyDeployment(), + makeService(), + makeReadyEndpoints(), + reconciletesting.NewInMemoryChannel(imcName, testNS), + makeChannelServiceNotOwnedByUs(reconciletesting.NewInMemoryChannel(imcName, testNS)), + }, + WantErr: true, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + reconciletesting.WithInitInMemoryChannelConditions, + reconciletesting.WithInMemoryChannelDeploymentReady(), + reconciletesting.WithInMemoryChannelServiceReady(), + reconciletesting.WithInMemoryChannelEndpointsReady(), + reconciletesting.WithInMemoryChannelChannelServicetNotReady("ChannelServiceFailed", "Channel Service failed: inmemorychannel: test-namespace/test-imc does not own Service: \"test-imc-kn-channel\""), + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: inmemorychannel: test-namespace/test-imc does not own Service: \"test-imc-kn-channel\""), + }, + }, { + Name: "channel does not exist, fails to create", + Key: imcKey, + Objects: []runtime.Object{ + makeReadyDeployment(), + makeService(), + makeReadyEndpoints(), + reconciletesting.NewInMemoryChannel(imcName, testNS), + }, + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "Services"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + reconciletesting.WithInitInMemoryChannelConditions, + reconciletesting.WithInMemoryChannelDeploymentReady(), + reconciletesting.WithInMemoryChannelServiceReady(), + reconciletesting.WithInMemoryChannelEndpointsReady(), + reconciletesting.WithInMemoryChannelChannelServicetNotReady("ChannelServiceFailed", "Channel Service failed: inducing failure for create services"), + ), + }}, + WantCreates: []metav1.Object{ + makeChannelService(reconciletesting.NewInMemoryChannel(imcName, testNS)), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: inducing failure for create services"), + }, + }, {}, + } + defer logtesting.ClearAll() + + table.Test(t, reconciletesting.MakeFactory(func(listers *reconciletesting.Listers, opt reconciler.Options) controller.Reconciler { + return &Reconciler{ + Base: reconciler.NewBase(opt, controllerAgentName), + dispatcherNamespace: testNS, + dispatcherDeploymentName: dispatcherDeploymentName, + dispatcherServiceName: dispatcherServiceName, + inmemorychannelLister: listers.GetInMemoryChannelLister(), + // TODO: FIx + inmemorychannelInformer: nil, + deploymentLister: listers.GetDeploymentLister(), + serviceLister: listers.GetServiceLister(), + endpointsLister: listers.GetEndpointsLister(), + } + }, + false, + )) +} + +func makeDeployment() *appsv1.Deployment { + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNS, + Name: dispatcherDeploymentName, + }, + Status: appsv1.DeploymentStatus{}, + } +} + +func makeReadyDeployment() *appsv1.Deployment { + d := makeDeployment() + d.Status.Conditions = []appsv1.DeploymentCondition{{Type: appsv1.DeploymentAvailable, Status: corev1.ConditionTrue}} + return d +} + +func makeService() *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNS, + Name: dispatcherServiceName, + }, + } +} + +func makeChannelService(imc *v1alpha1.InMemoryChannel) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNS, + Name: fmt.Sprintf("%s-kn-channel", imcName), + Labels: map[string]string{ + "eventing.knative.dev/role": "in-memory-channel", + }, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(imc), + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: fmt.Sprintf("%s.%s.svc.%s", dispatcherServiceName, testNS, utils.GetClusterDomainName()), + }, + } +} + +func makeChannelServiceNotOwnedByUs(imc *v1alpha1.InMemoryChannel) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNS, + Name: fmt.Sprintf("%s-kn-channel", imcName), + Labels: map[string]string{ + "eventing.knative.dev/role": "in-memory-channel", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: fmt.Sprintf("%s.%s.svc.%s", dispatcherServiceName, testNS, utils.GetClusterDomainName()), + }, + } +} + +func makeEmptyEndpoints() *corev1.Endpoints { + return &corev1.Endpoints{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Endpoints", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNS, + Name: dispatcherServiceName, + }, + } +} + +func makeReadyEndpoints() *corev1.Endpoints { + e := makeEmptyEndpoints() + e.Subsets = []corev1.EndpointSubset{{Addresses: []corev1.EndpointAddress{{IP: "1.1.1.1"}}}} + return e +} From 52176014c98c4ba536b03d965c5ec81b74847552 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 19:28:19 -0700 Subject: [PATCH 13/64] rollbacking changes --- .../v1alpha1/kafka_channel_defaults.go | 11 +- .../v1alpha1/kafka_channel_defaults_test.go | 134 ------------------ .../messaging/v1alpha1/kafka_channel_types.go | 15 -- .../v1alpha1/kafka_channel_validation.go | 34 ----- .../v1alpha1/kafka_channel_validation_test.go | 78 ---------- contrib/kafka/pkg/controller/provider.go | 1 - .../kafka/pkg/controller/reconcile_test.go | 1 - .../pkg/{reconciler => controller}/types.go | 15 +- contrib/kafka/pkg/controller/util.go | 12 +- contrib/kafka/pkg/controller/util_test.go | 4 +- 10 files changed, 12 insertions(+), 293 deletions(-) delete mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go rename contrib/kafka/pkg/{reconciler => controller}/types.go (64%) diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go index 0aa0909ad24..eecba31d091 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go @@ -18,7 +18,6 @@ package v1alpha1 import ( "context" - . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" ) func (c *KafkaChannel) SetDefaults(ctx context.Context) { @@ -26,13 +25,5 @@ func (c *KafkaChannel) SetDefaults(ctx context.Context) { } func (cs *KafkaChannelSpec) SetDefaults(ctx context.Context) { - if cs.ConsumerMode == "" { - cs.ConsumerMode = ConsumerModeMultiplexConsumerValue - } - if cs.NumPartitions <= 0 { - cs.NumPartitions = DefaultNumPartitions - } - if cs.ReplicationFactor <= 0 { - cs.ReplicationFactor = DefaultReplicationFactor - } + // TODO: Nothing to default here... } diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go deleted file mode 100644 index 601f6acbbaf..00000000000 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" -) - -const ( - testNumPartitions = 10 - testReplicationFactor = 5 -) - -func TestKafkaChannelDefaults(t *testing.T) { - testCases := map[string]struct { - initial KafkaChannel - expected KafkaChannel - }{ - "nil spec": { - initial: KafkaChannel{}, - expected: KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: ConsumerModeMultiplexConsumerValue, - NumPartitions: DefaultNumPartitions, - ReplicationFactor: DefaultReplicationFactor, - }, - }, - }, - "consumerMode empty": { - initial: KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: "", - NumPartitions: testNumPartitions, - ReplicationFactor: testReplicationFactor, - }, - }, - expected: KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: ConsumerModeMultiplexConsumerValue, - NumPartitions: testNumPartitions, - ReplicationFactor: testReplicationFactor, - }, - }, - }, - "numPartitions not set": { - initial: KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: ConsumerModeMultiplexConsumerValue, - ReplicationFactor: testReplicationFactor, - }, - }, - expected: KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: ConsumerModeMultiplexConsumerValue, - NumPartitions: DefaultNumPartitions, - ReplicationFactor: testReplicationFactor, - }, - }, - }, - "numPartitions negative": { - initial: KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: ConsumerModeMultiplexConsumerValue, - ReplicationFactor: testReplicationFactor, - NumPartitions: -10, - }, - }, - expected: KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: ConsumerModeMultiplexConsumerValue, - NumPartitions: DefaultNumPartitions, - ReplicationFactor: testReplicationFactor, - }, - }, - }, - "replicationFactor not set": { - initial: KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: ConsumerModeMultiplexConsumerValue, - NumPartitions: testNumPartitions, - }, - }, - expected: KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: ConsumerModeMultiplexConsumerValue, - NumPartitions: testNumPartitions, - ReplicationFactor: DefaultReplicationFactor, - }, - }, - }, - "replicationFactor negative": { - initial: KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: ConsumerModeMultiplexConsumerValue, - NumPartitions: testNumPartitions, - ReplicationFactor: -10, - }, - }, - expected: KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: ConsumerModeMultiplexConsumerValue, - NumPartitions: testNumPartitions, - ReplicationFactor: DefaultReplicationFactor, - }, - }, - }, - } - for n, tc := range testCases { - t.Run(n, func(t *testing.T) { - tc.initial.SetDefaults(context.TODO()) - if diff := cmp.Diff(tc.expected, tc.initial); diff != "" { - t.Fatalf("Unexpected defaults (-want, +got): %s", diff) - } - }) - } -} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go index 2fa71aa17c3..3e685c9ddfb 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go @@ -52,21 +52,6 @@ var _ webhook.GenericCRD = (*KafkaChannel)(nil) // KafkaChannelSpec defines the specification for a KafkaChannel. type KafkaChannelSpec struct { - // Comma-separated list of the Broker URL of the Kafka cluster, which is in the format - // of my-cluster-kafka-bootstrap.my-kafka-namespace:9092. - BootstrapServers string `json:"bootstrapServers"` - - // ConsumerMode is the mode used to dispatch events from different partitions in parallel. - // By default, partitions are multiplexed with a single go channel (multiplex). - // `multiplex` and `partitions` are valid values. - ConsumerMode string `json:"consumerMode"` - - // NumPartitions is the number of partitions of a Kafka topic. By default, it is set to 1. - NumPartitions int32 `json:"numPartitions"` - - // ReplicationFactor is the replication factor of a Kafka topic. By default, it is set to 1. - ReplicationFactor int16 `json:"replicationFactor"` - // KafkaChannel conforms to Duck type Subscribable. Subscribable *eventingduck.Subscribable `json:"subscribable,omitempty"` } diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go index 010c4c94434..015f6db8d3c 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go @@ -19,9 +19,6 @@ package v1alpha1 import ( "context" "fmt" - . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" - "strings" - "github.com/knative/pkg/apis" ) @@ -32,37 +29,6 @@ func (c *KafkaChannel) Validate(ctx context.Context) *apis.FieldError { func (cs *KafkaChannelSpec) Validate(ctx context.Context) *apis.FieldError { var errs *apis.FieldError - if cs.BootstrapServers == "" { - fe := apis.ErrMissingField("bootstrapServers") - errs = errs.Also(fe) - } else { - bootstrapServers := strings.Split(cs.BootstrapServers, ",") - for i, s := range bootstrapServers { - if len(s) == 0 { - fe := apis.ErrMissingField(fmt.Sprintf("bootstrapServers[%d]", i)) - errs = errs.Also(fe) - } - } - } - - if cs.ConsumerMode == "" { - fe := apis.ErrMissingField("consumerMode") - errs = errs.Also(fe) - } else if cs.ConsumerMode != ConsumerModePartitionConsumerValue && cs.ConsumerMode != ConsumerModeMultiplexConsumerValue { - fe := apis.ErrInvalidValue(cs.ConsumerMode, "consumerMode") - errs = errs.Also(fe) - } - - if cs.NumPartitions < 0 { - fe := apis.ErrInvalidValue(cs.NumPartitions, "numPartitions") - errs = errs.Also(fe) - } - - if cs.ReplicationFactor < 0 { - fe := apis.ErrInvalidValue(cs.ReplicationFactor, "replicationFactor") - errs = errs.Also(fe) - } - if cs.Subscribable != nil { for i, subscriber := range cs.Subscribable.Subscribers { if subscriber.ReplyURI == "" && subscriber.SubscriberURI == "" { diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go index f836c9161fc..468019cd99b 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go @@ -40,83 +40,9 @@ func TestKafkaChannelValidation(t *testing.T) { return fe }(), }, - "empty bootstrapServers": { - cr: &KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: "multiplex", - }, - }, - want: func() *apis.FieldError { - fe := apis.ErrMissingField("spec.bootstrapServers") - return fe - }(), - }, - "empty bootstrapServer at index 1": { - cr: &KafkaChannel{ - Spec: KafkaChannelSpec{ - BootstrapServers: "bootstrap_srvs,", - ConsumerMode: "multiplex", - }, - }, - want: func() *apis.FieldError { - fe := apis.ErrMissingField("spec.bootstrapServers[1]") - return fe - }(), - }, - "empty consumerMode": { - cr: &KafkaChannel{ - Spec: KafkaChannelSpec{ - BootstrapServers: "bootstrap_srvs", - }, - }, - want: func() *apis.FieldError { - fe := apis.ErrMissingField("spec.consumerMode") - return fe - }(), - }, - "invalid consumerMode": { - cr: &KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: "invalid_value", - BootstrapServers: "bootstrap_srvs", - }, - }, - want: func() *apis.FieldError { - fe := apis.ErrInvalidValue("invalid_value", "spec.consumerMode") - return fe - }(), - }, - "negative numPartitions": { - cr: &KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: "multiplex", - BootstrapServers: "bootstrap_srvs", - NumPartitions: -10, - }, - }, - want: func() *apis.FieldError { - fe := apis.ErrInvalidValue(-10, "spec.numPartitions") - return fe - }(), - }, - "negative replicationFactor": { - cr: &KafkaChannel{ - Spec: KafkaChannelSpec{ - ConsumerMode: "multiplex", - BootstrapServers: "bootstrap_srvs", - ReplicationFactor: -10, - }, - }, - want: func() *apis.FieldError { - fe := apis.ErrInvalidValue(-10, "spec.replicationFactor") - return fe - }(), - }, "valid subscribers array": { cr: &KafkaChannel{ Spec: KafkaChannelSpec{ - ConsumerMode: "multiplex", - BootstrapServers: "bootstrap_srvs", Subscribable: &eventingduck.Subscribable{ Subscribers: []eventingduck.ChannelSubscriberSpec{{ SubscriberURI: "subscriberendpoint", @@ -129,8 +55,6 @@ func TestKafkaChannelValidation(t *testing.T) { "empty subscriber at index 1": { cr: &KafkaChannel{ Spec: KafkaChannelSpec{ - ConsumerMode: "multiplex", - BootstrapServers: "bootstrap_srvs", Subscribable: &eventingduck.Subscribable{ Subscribers: []eventingduck.ChannelSubscriberSpec{{ SubscriberURI: "subscriberendpoint", @@ -147,8 +71,6 @@ func TestKafkaChannelValidation(t *testing.T) { "two empty subscribers": { cr: &KafkaChannel{ Spec: KafkaChannelSpec{ - ConsumerMode: "multiplex", - BootstrapServers: "bootstrap_srvs", Subscribable: &eventingduck.Subscribable{ Subscribers: []eventingduck.ChannelSubscriberSpec{{}, {}}, }, diff --git a/contrib/kafka/pkg/controller/provider.go b/contrib/kafka/pkg/controller/provider.go index 092c6c01dfb..0f6ca5631f2 100644 --- a/contrib/kafka/pkg/controller/provider.go +++ b/contrib/kafka/pkg/controller/provider.go @@ -27,7 +27,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" ) diff --git a/contrib/kafka/pkg/controller/reconcile_test.go b/contrib/kafka/pkg/controller/reconcile_test.go index c7508c8a0fc..6e69b6aa57a 100644 --- a/contrib/kafka/pkg/controller/reconcile_test.go +++ b/contrib/kafka/pkg/controller/reconcile_test.go @@ -21,7 +21,6 @@ import ( "fmt" "testing" - . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" controllertesting "github.com/knative/eventing/pkg/reconciler/testing" diff --git a/contrib/kafka/pkg/reconciler/types.go b/contrib/kafka/pkg/controller/types.go similarity index 64% rename from contrib/kafka/pkg/reconciler/types.go rename to contrib/kafka/pkg/controller/types.go index fea84c8b0db..3c2daff9964 100644 --- a/contrib/kafka/pkg/reconciler/types.go +++ b/contrib/kafka/pkg/controller/types.go @@ -14,20 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -package reconciler +package controller -import "github.com/bsm/sarama-cluster" - -const ( - ConsumerModePartitionConsumerValue = "partitions" - ConsumerModeMultiplexConsumerValue = "multiplex" - - // DefaultNumPartitions defines the default number of partitions - DefaultNumPartitions = 1 - - // DefaultReplicationFactor defines the default number of replications - DefaultReplicationFactor = 1 -) +import cluster "github.com/bsm/sarama-cluster" type KafkaProvisionerConfig struct { Brokers []string diff --git a/contrib/kafka/pkg/controller/util.go b/contrib/kafka/pkg/controller/util.go index 5450f890f2d..7b3a56d448e 100644 --- a/contrib/kafka/pkg/controller/util.go +++ b/contrib/kafka/pkg/controller/util.go @@ -5,15 +5,17 @@ import ( "log" "strings" - "github.com/bsm/sarama-cluster" - . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" + cluster "github.com/bsm/sarama-cluster" + "github.com/knative/pkg/configmap" ) const ( - BrokerConfigMapKey = "bootstrap_servers" - ConsumerModeConfigMapKey = "consumer_mode" - KafkaChannelSeparator = "." + BrokerConfigMapKey = "bootstrap_servers" + ConsumerModeConfigMapKey = "consumer_mode" + ConsumerModePartitionConsumerValue = "partitions" + ConsumerModeMultiplexConsumerValue = "multiplex" + KafkaChannelSeparator = "." ) // GetProvisionerConfig returns the details of the associated ClusterChannelProvisioner object diff --git a/contrib/kafka/pkg/controller/util_test.go b/contrib/kafka/pkg/controller/util_test.go index 8ab18d63855..526f183bd86 100644 --- a/contrib/kafka/pkg/controller/util_test.go +++ b/contrib/kafka/pkg/controller/util_test.go @@ -6,9 +6,9 @@ import ( "path/filepath" "testing" - "github.com/bsm/sarama-cluster" + cluster "github.com/bsm/sarama-cluster" + "github.com/google/go-cmp/cmp" - . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" _ "github.com/knative/pkg/system/testing" ) From 881a3fbe78233568953549327eb1fed8abe34589 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 19:33:44 -0700 Subject: [PATCH 14/64] more rollbacks --- contrib/kafka/pkg/controller/channel/provider.go | 5 ++--- contrib/kafka/pkg/controller/channel/reconcile.go | 11 ++++++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/contrib/kafka/pkg/controller/channel/provider.go b/contrib/kafka/pkg/controller/channel/provider.go index ee5407ea90c..73eab2e8d22 100644 --- a/contrib/kafka/pkg/controller/channel/provider.go +++ b/contrib/kafka/pkg/controller/channel/provider.go @@ -30,7 +30,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" common "github.com/knative/eventing/contrib/kafka/pkg/controller" - . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/pkg/system" ) @@ -52,7 +51,7 @@ type reconciler struct { client client.Client recorder record.EventRecorder logger *zap.Logger - config *KafkaProvisionerConfig + config *common.KafkaProvisionerConfig // Using a shared kafkaClusterAdmin does not work currently because of an issue with // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. kafkaClusterAdmin sarama.ClusterAdmin @@ -62,7 +61,7 @@ type reconciler struct { var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Channel controller. -func ProvideController(mgr manager.Manager, config *KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { +func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Channel. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ diff --git a/contrib/kafka/pkg/controller/channel/reconcile.go b/contrib/kafka/pkg/controller/channel/reconcile.go index 20538c6de7e..34a7e1c9b71 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile.go +++ b/contrib/kafka/pkg/controller/channel/reconcile.go @@ -23,13 +23,12 @@ import ( "github.com/Shopify/sarama" "go.uber.org/zap" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/knative/eventing/contrib/kafka/pkg/controller" - . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" util "github.com/knative/eventing/pkg/provisioners" topicUtils "github.com/knative/eventing/pkg/provisioners/utils" @@ -39,6 +38,12 @@ import ( const ( finalizerName = controllerAgentName + // DefaultNumPartitions defines the default number of partitions + DefaultNumPartitions = 1 + + // DefaultReplicationFactor defines the default number of replications + DefaultReplicationFactor = 1 + // Name of the corev1.Events emitted from the reconciliation process dispatcherReconcileFailed = "DispatcherReconcileFailed" dispatcherUpdateStatusFailed = "DispatcherUpdateStatusFailed" @@ -241,7 +246,7 @@ func (r *reconciler) getClusterChannelProvisioner() (*eventingv1alpha1.ClusterCh return clusterChannelProvisioner, nil } -func createKafkaAdminClient(config *KafkaProvisionerConfig) (sarama.ClusterAdmin, error) { +func createKafkaAdminClient(config *controller.KafkaProvisionerConfig) (sarama.ClusterAdmin, error) { saramaConf := sarama.NewConfig() saramaConf.Version = sarama.V1_1_0_0 saramaConf.ClientID = controllerAgentName From 07c383a38cf2603fcf0ea27c0c78500bc38b0844 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 19:35:04 -0700 Subject: [PATCH 15/64] rollback controller main --- contrib/kafka/cmd/controller/main.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index 041a0a3079d..f50f285f326 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -25,7 +25,6 @@ import ( provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" "github.com/knative/eventing/contrib/kafka/pkg/controller/channel" - . "github.com/knative/eventing/contrib/kafka/pkg/reconciler" eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" "go.uber.org/zap" @@ -41,7 +40,7 @@ import ( type SchemeFunc func(*runtime.Scheme) error // ProvideFunc adds a controller to a Manager. -type ProvideFunc func(mgr manager.Manager, config *KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) +type ProvideFunc func(mgr manager.Manager, config *provisionerController.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) func main() { os.Exit(_main()) From a7cd931a05e03c82baab05b678ee5edd681f2f90 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 20:21:46 -0700 Subject: [PATCH 16/64] moving things down --- contrib/kafka/cmd/channel_controller/main.go | 8 ++++ contrib/kafka/cmd/controller/main.go | 5 ++- contrib/kafka/cmd/dispatcher/main.go | 4 +- .../kafka/pkg/controller/channel/provider.go | 5 ++- .../kafka/pkg/controller/channel/reconcile.go | 7 ++-- .../pkg/controller/channel/reconcile_test.go | 6 +-- contrib/kafka/pkg/controller/provider.go | 5 ++- .../kafka/pkg/controller/reconcile_test.go | 5 ++- contrib/kafka/pkg/controller/types.go | 24 ------------ contrib/kafka/pkg/dispatcher/dispatcher.go | 5 ++- .../pkg/reconciler/controller/kafkachannel.go | 8 +++- .../controller/resources/service.go | 11 +++--- .../controller/resources/service_test.go | 4 +- .../reconciler/controller/resources/topic.go | 22 ++++++++++- .../kafka/pkg/{controller => utils}/util.go | 37 +++++++++++++++---- .../pkg/{controller => utils}/util_test.go | 34 ++++++++++++----- 16 files changed, 121 insertions(+), 69 deletions(-) delete mode 100644 contrib/kafka/pkg/controller/types.go rename contrib/kafka/pkg/{controller => utils}/util.go (53%) rename contrib/kafka/pkg/{controller => utils}/util_test.go (79%) diff --git a/contrib/kafka/cmd/channel_controller/main.go b/contrib/kafka/cmd/channel_controller/main.go index cfca19296b0..fb5205b154e 100644 --- a/contrib/kafka/cmd/channel_controller/main.go +++ b/contrib/kafka/cmd/channel_controller/main.go @@ -18,6 +18,7 @@ package main import ( "flag" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "log" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" @@ -62,6 +63,12 @@ func main() { logger.Fatalw("Error building kubeconfig", zap.Error(err)) } + // TODO the underlying config map needs to be watched and the config should be reloaded if there is a change. + kafkaConfig, err := utils.GetKafkaConfig("/etc/config-provisioner") + if err != nil { + logger.Fatalw("Error loading kafka config", zap.Error(err)) + } + logger = logger.With(zap.String("controller/impl", "pkg")) logger.Info("Starting the Kafka controller") @@ -92,6 +99,7 @@ func main() { controller.NewController( opt, eventingClientSet, + kafkaConfig, systemNS, dispatcherDeploymentName, dispatcherServiceName, diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index f50f285f326..cbf42e3aa30 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -18,6 +18,7 @@ package main import ( "flag" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "os" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). @@ -40,7 +41,7 @@ import ( type SchemeFunc func(*runtime.Scheme) error // ProvideFunc adds a controller to a Manager. -type ProvideFunc func(mgr manager.Manager, config *provisionerController.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) +type ProvideFunc func(mgr manager.Manager, config *utils.KafkaConfig, logger *zap.Logger) (controller.Controller, error) func main() { os.Exit(_main()) @@ -76,7 +77,7 @@ func _main() int { } // TODO the underlying config map needs to be watched and the config should be reloaded if there is a change. - provisionerConfig, err := provisionerController.GetProvisionerConfig("/etc/config-provisioner") + provisionerConfig, err := utils.GetKafkaConfig("/etc/config-provisioner") if err != nil { logger.Error(err, "unable to run controller manager") diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index b9d268d48d9..da004665bb7 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -18,10 +18,10 @@ package main import ( "flag" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "log" "github.com/knative/eventing/contrib/kafka/pkg/controller" - provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/channelwatcher" @@ -41,7 +41,7 @@ func main() { if err != nil { log.Fatalf("unable to create logger: %v", err) } - provisionerConfig, err := provisionerController.GetProvisionerConfig("/etc/config-provisioner") + provisionerConfig, err := utils.GetKafkaConfig("/etc/config-provisioner") if err != nil { logger.Fatal("unable to load provisioner config", zap.Error(err)) } diff --git a/contrib/kafka/pkg/controller/channel/provider.go b/contrib/kafka/pkg/controller/channel/provider.go index 73eab2e8d22..b55a956c9a1 100644 --- a/contrib/kafka/pkg/controller/channel/provider.go +++ b/contrib/kafka/pkg/controller/channel/provider.go @@ -30,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" common "github.com/knative/eventing/contrib/kafka/pkg/controller" + "github.com/knative/eventing/contrib/kafka/pkg/utils" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/pkg/system" ) @@ -51,7 +52,7 @@ type reconciler struct { client client.Client recorder record.EventRecorder logger *zap.Logger - config *common.KafkaProvisionerConfig + config *utils.KafkaConfig // Using a shared kafkaClusterAdmin does not work currently because of an issue with // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. kafkaClusterAdmin sarama.ClusterAdmin @@ -61,7 +62,7 @@ type reconciler struct { var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Channel controller. -func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { +func ProvideController(mgr manager.Manager, config *utils.KafkaConfig, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Channel. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ diff --git a/contrib/kafka/pkg/controller/channel/reconcile.go b/contrib/kafka/pkg/controller/channel/reconcile.go index 34a7e1c9b71..ba7e271557d 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile.go +++ b/contrib/kafka/pkg/controller/channel/reconcile.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "github.com/Shopify/sarama" "go.uber.org/zap" @@ -185,7 +186,7 @@ func (r *reconciler) shouldReconcile(channel *eventingv1alpha1.Channel, clusterC } func (r *reconciler) provisionChannel(channel *eventingv1alpha1.Channel, kafkaClusterAdmin sarama.ClusterAdmin) error { - topicName := topicUtils.TopicName(controller.KafkaChannelSeparator, channel.Namespace, channel.Name) + topicName := topicUtils.TopicName(utils.KafkaChannelSeparator, channel.Namespace, channel.Name) r.logger.Info("creating topic on kafka cluster", zap.String("topic", topicName)) var arguments channelArgs @@ -221,7 +222,7 @@ func (r *reconciler) provisionChannel(channel *eventingv1alpha1.Channel, kafkaCl } func (r *reconciler) deprovisionChannel(channel *eventingv1alpha1.Channel, kafkaClusterAdmin sarama.ClusterAdmin) error { - topicName := topicUtils.TopicName(controller.KafkaChannelSeparator, channel.Namespace, channel.Name) + topicName := topicUtils.TopicName(utils.KafkaChannelSeparator, channel.Namespace, channel.Name) r.logger.Info("deleting topic on kafka cluster", zap.String("topic", topicName)) err := kafkaClusterAdmin.DeleteTopic(topicName) @@ -246,7 +247,7 @@ func (r *reconciler) getClusterChannelProvisioner() (*eventingv1alpha1.ClusterCh return clusterChannelProvisioner, nil } -func createKafkaAdminClient(config *controller.KafkaProvisionerConfig) (sarama.ClusterAdmin, error) { +func createKafkaAdminClient(config *utils.KafkaConfig) (sarama.ClusterAdmin, error) { saramaConf := sarama.NewConfig() saramaConf.Version = sarama.V1_1_0_0 saramaConf.ClientID = controllerAgentName diff --git a/contrib/kafka/pkg/controller/channel/reconcile_test.go b/contrib/kafka/pkg/controller/channel/reconcile_test.go index 02836e06a54..036f058536b 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile_test.go +++ b/contrib/kafka/pkg/controller/channel/reconcile_test.go @@ -26,7 +26,7 @@ import ( "github.com/Shopify/sarama" "github.com/google/go-cmp/cmp" - "github.com/knative/eventing/contrib/kafka/pkg/controller" + . "github.com/knative/eventing/contrib/kafka/pkg/utils" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" util "github.com/knative/eventing/pkg/provisioners" @@ -531,8 +531,8 @@ func om(namespace, name string) metav1.ObjectMeta { } } -func getControllerConfig() *controller.KafkaProvisionerConfig { - return &controller.KafkaProvisionerConfig{ +func getControllerConfig() *KafkaConfig { + return &KafkaConfig{ Brokers: []string{"test-broker"}, } } diff --git a/contrib/kafka/pkg/controller/provider.go b/contrib/kafka/pkg/controller/provider.go index 0f6ca5631f2..2e0dec6a031 100644 --- a/contrib/kafka/pkg/controller/provider.go +++ b/contrib/kafka/pkg/controller/provider.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "github.com/knative/eventing/contrib/kafka/pkg/utils" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" @@ -45,14 +46,14 @@ type reconciler struct { client client.Client recorder record.EventRecorder logger *zap.Logger - config *KafkaProvisionerConfig + config *utils.KafkaConfig } // Verify the struct implements reconcile.Reconciler var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Provisioner controller. -func ProvideController(mgr manager.Manager, config *KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { +func ProvideController(mgr manager.Manager, config *utils.KafkaConfig, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Provisioners. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ diff --git a/contrib/kafka/pkg/controller/reconcile_test.go b/contrib/kafka/pkg/controller/reconcile_test.go index 6e69b6aa57a..72c3b63fcc4 100644 --- a/contrib/kafka/pkg/controller/reconcile_test.go +++ b/contrib/kafka/pkg/controller/reconcile_test.go @@ -19,6 +19,7 @@ package controller import ( "context" "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "testing" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" @@ -195,8 +196,8 @@ func om(namespace, name string) metav1.ObjectMeta { } } -func getControllerConfig() *KafkaProvisionerConfig { - return &KafkaProvisionerConfig{ +func getControllerConfig() *utils.KafkaConfig { + return &utils.KafkaConfig{ Brokers: []string{"test-broker"}, } } diff --git a/contrib/kafka/pkg/controller/types.go b/contrib/kafka/pkg/controller/types.go deleted file mode 100644 index 3c2daff9964..00000000000 --- a/contrib/kafka/pkg/controller/types.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import cluster "github.com/bsm/sarama-cluster" - -type KafkaProvisionerConfig struct { - Brokers []string - ConsumerMode cluster.ConsumerMode -} diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index 7edaf20ef6d..47cde09d3fe 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -18,6 +18,7 @@ package dispatcher import ( "errors" "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "sync" "sync/atomic" @@ -198,7 +199,7 @@ func (d *KafkaDispatcher) Start(stopCh <-chan struct{}) error { func (d *KafkaDispatcher) subscribe(channelRef provisioners.ChannelReference, sub subscription) error { d.logger.Info("Subscribing", zap.Any("channelRef", channelRef), zap.Any("subscription", sub)) - topicName := topicUtils.TopicName(controller.KafkaChannelSeparator, channelRef.Namespace, channelRef.Name) + topicName := topicUtils.TopicName(utils.KafkaChannelSeparator, channelRef.Namespace, channelRef.Name) group := fmt.Sprintf("%s.%s", controller.Name, sub.UID) consumer, err := d.kafkaCluster.NewConsumer(group, []string{topicName}) @@ -362,7 +363,7 @@ func fromKafkaMessage(kafkaMessage *sarama.ConsumerMessage) *provisioners.Messag func toKafkaMessage(channel provisioners.ChannelReference, message *provisioners.Message) *sarama.ProducerMessage { kafkaMessage := sarama.ProducerMessage{ - Topic: topicUtils.TopicName(controller.KafkaChannelSeparator, channel.Namespace, channel.Name), + Topic: topicUtils.TopicName(utils.KafkaChannelSeparator, channel.Namespace, channel.Name), Value: sarama.ByteEncoder(message.Payload), } for h, v := range message.Headers { diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go index 70a67921c54..7eda435a4a7 100644 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -20,9 +20,9 @@ import ( "context" "encoding/json" "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "github.com/knative/eventing/pkg/reconciler/names" "reflect" - "strings" "time" "github.com/Shopify/sarama" @@ -72,6 +72,8 @@ type Reconciler struct { dispatcherDeploymentName string dispatcherServiceName string + kafkaConfig *utils.KafkaConfig + // Using a shared kafkaClusterAdmin does not work currently because of an issue with // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. kafkaClusterAdmin sarama.ClusterAdmin @@ -101,6 +103,7 @@ var _ cache.ResourceEventHandler = (*Reconciler)(nil) func NewController( opt reconciler.Options, eventingClientSet *versioned.Clientset, + kafkaConfig *utils.KafkaConfig, dispatcherNamespace string, dispatcherDeploymentName string, dispatcherServiceName string, @@ -115,6 +118,7 @@ func NewController( dispatcherNamespace: dispatcherNamespace, dispatcherDeploymentName: dispatcherDeploymentName, dispatcherServiceName: dispatcherServiceName, + kafkaConfig: kafkaConfig, eventingClientSet: eventingClientSet, kafkachannelLister: kafkachannelInformer.Lister(), kafkachannelInformer: kafkachannelInformer.Informer(), @@ -381,7 +385,7 @@ func (r *Reconciler) createClient(ctx context.Context, kc *v1alpha1.KafkaChannel var err error args := &resources.ClientArgs{ ClientID: controllerAgentName, - BootstrapServers: strings.Split(kc.Spec.BootstrapServers, ","), + BootstrapServers: r.kafkaConfig.Brokers, } kafkaClusterAdmin, err = resources.MakeClient(args) if err != nil { diff --git a/contrib/kafka/pkg/reconciler/controller/resources/service.go b/contrib/kafka/pkg/reconciler/controller/resources/service.go index b9d6ba72adb..2161b20f21e 100644 --- a/contrib/kafka/pkg/reconciler/controller/resources/service.go +++ b/contrib/kafka/pkg/reconciler/controller/resources/service.go @@ -27,10 +27,11 @@ import ( ) const ( - portName = "http" - portNumber = 80 - MessagingRoleLabel = "messaging.knative.dev/role" - MessagingRole = "kafka-channel" + portName = "http" + portNumber = 80 + // TODO messaging instead? + EventingRoleLabel = "eventing.knative.dev/role" + EventingRole = "kafka-channel" ) // ServiceOption can be used to optionally modify the K8s service in MakeK8sService. @@ -70,7 +71,7 @@ func MakeK8sService(kc *v1alpha1.KafkaChannel, opts ...ServiceOption) (*corev1.S Name: MakeChannelServiceName(kc.ObjectMeta.Name), Namespace: kc.Namespace, Labels: map[string]string{ - MessagingRoleLabel: MessagingRole, + EventingRoleLabel: EventingRole, }, OwnerReferences: []metav1.OwnerReference{ *kmeta.NewControllerRef(kc), diff --git a/contrib/kafka/pkg/reconciler/controller/resources/service_test.go b/contrib/kafka/pkg/reconciler/controller/resources/service_test.go index 66dac7ff372..f6f033e6ed1 100644 --- a/contrib/kafka/pkg/reconciler/controller/resources/service_test.go +++ b/contrib/kafka/pkg/reconciler/controller/resources/service_test.go @@ -64,7 +64,7 @@ func TestMakeService(t *testing.T) { Name: fmt.Sprintf("%s-kn-channel", kcName), Namespace: testNS, Labels: map[string]string{ - MessagingRoleLabel: MessagingRole, + EventingRoleLabel: EventingRole, }, OwnerReferences: []metav1.OwnerReference{ *kmeta.NewControllerRef(imc), @@ -107,7 +107,7 @@ func TestMakeServiceWithExternal(t *testing.T) { Name: fmt.Sprintf("%s-kn-channel", kcName), Namespace: testNS, Labels: map[string]string{ - MessagingRoleLabel: MessagingRole, + EventingRoleLabel: EventingRole, }, OwnerReferences: []metav1.OwnerReference{ *kmeta.NewControllerRef(imc), diff --git a/contrib/kafka/pkg/reconciler/controller/resources/topic.go b/contrib/kafka/pkg/reconciler/controller/resources/topic.go index 1871e469289..b257b8a38fe 100644 --- a/contrib/kafka/pkg/reconciler/controller/resources/topic.go +++ b/contrib/kafka/pkg/reconciler/controller/resources/topic.go @@ -1,3 +1,19 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package resources import ( @@ -5,6 +21,10 @@ import ( "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" ) +const ( + kafkaChannelPrefix = "knative-messaging-kafka-channel" +) + func MakeTopicName(channel *v1alpha1.KafkaChannel) string { - return fmt.Sprintf("%s.%s", channel.Namespace, channel.Name) + return fmt.Sprintf("%s.%s.%s", kafkaChannelPrefix, channel.Namespace, channel.Name) } diff --git a/contrib/kafka/pkg/controller/util.go b/contrib/kafka/pkg/utils/util.go similarity index 53% rename from contrib/kafka/pkg/controller/util.go rename to contrib/kafka/pkg/utils/util.go index 7b3a56d448e..9e5400b2fc4 100644 --- a/contrib/kafka/pkg/controller/util.go +++ b/contrib/kafka/pkg/utils/util.go @@ -1,4 +1,20 @@ -package controller +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils import ( "fmt" @@ -18,29 +34,34 @@ const ( KafkaChannelSeparator = "." ) -// GetProvisionerConfig returns the details of the associated ClusterChannelProvisioner object -func GetProvisionerConfig(path string) (*KafkaProvisionerConfig, error) { +type KafkaConfig struct { + Brokers []string + ConsumerMode cluster.ConsumerMode +} + +// GetKafkaConfig returns the details of the Kafka cluster. +func GetKafkaConfig(path string) (*KafkaConfig, error) { configMap, err := configmap.Load(path) if err != nil { - return nil, fmt.Errorf("error loading provisioner configuration: %s", err) + return nil, fmt.Errorf("error loading configuration: %s", err) } if len(configMap) == 0 { - return nil, fmt.Errorf("missing provisioner configuration") + return nil, fmt.Errorf("missing configuration") } - config := &KafkaProvisionerConfig{} + config := &KafkaConfig{} if brokers, ok := configMap[BrokerConfigMapKey]; ok { bootstrapServers := strings.Split(brokers, ",") for _, s := range bootstrapServers { if len(s) == 0 { - return nil, fmt.Errorf("empty %s value in provisioner configuration", BrokerConfigMapKey) + return nil, fmt.Errorf("empty %s value in configuration", BrokerConfigMapKey) } } config.Brokers = bootstrapServers } else { - return nil, fmt.Errorf("missing key %s in provisioner configuration", BrokerConfigMapKey) + return nil, fmt.Errorf("missing key %s in configuration", BrokerConfigMapKey) } config.ConsumerMode = cluster.ConsumerModeMultiplex diff --git a/contrib/kafka/pkg/controller/util_test.go b/contrib/kafka/pkg/utils/util_test.go similarity index 79% rename from contrib/kafka/pkg/controller/util_test.go rename to contrib/kafka/pkg/utils/util_test.go index 526f183bd86..bcdd7c4fcad 100644 --- a/contrib/kafka/pkg/controller/util_test.go +++ b/contrib/kafka/pkg/utils/util_test.go @@ -1,4 +1,20 @@ -package controller +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils import ( "io/ioutil" @@ -12,14 +28,14 @@ import ( _ "github.com/knative/pkg/system/testing" ) -func TestGetProvisionerConfigBrokers(t *testing.T) { +func TestGetKafkaConfig(t *testing.T) { testCases := []struct { name string data map[string]string path string getError string - expected *KafkaProvisionerConfig + expected *KafkaConfig }{ { name: "invalid config path", @@ -44,21 +60,21 @@ func TestGetProvisionerConfigBrokers(t *testing.T) { { name: "single bootstrap_servers", data: map[string]string{"bootstrap_servers": "kafkabroker.kafka:9092"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker.kafka:9092"}, }, }, { name: "multiple bootstrap_servers", data: map[string]string{"bootstrap_servers": "kafkabroker1.kafka:9092,kafkabroker2.kafka:9092"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker1.kafka:9092", "kafkabroker2.kafka:9092"}, }, }, { name: "partition consumer", data: map[string]string{"bootstrap_servers": "kafkabroker.kafka:9092", "consumer_mode": "partitions"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker.kafka:9092"}, ConsumerMode: cluster.ConsumerModePartitions, }, @@ -66,7 +82,7 @@ func TestGetProvisionerConfigBrokers(t *testing.T) { { name: "default multiplex", data: map[string]string{"bootstrap_servers": "kafkabroker.kafka:9092", "consumer_mode": "multiplex"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker.kafka:9092"}, ConsumerMode: cluster.ConsumerModeMultiplex, }, @@ -74,7 +90,7 @@ func TestGetProvisionerConfigBrokers(t *testing.T) { { name: "default multiplex from invalid consumer_mode", data: map[string]string{"bootstrap_servers": "kafkabroker.kafka:9092", "consumer_mode": "foo"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker.kafka:9092"}, ConsumerMode: cluster.ConsumerModeMultiplex, }, @@ -103,7 +119,7 @@ func TestGetProvisionerConfigBrokers(t *testing.T) { tc.path = dir } - got, err := GetProvisionerConfig(tc.path) + got, err := GetKafkaConfig(tc.path) if tc.getError != "" { if err == nil { From 38a9b4506bc8ae642c6a98777e0bf05925062399 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 20:36:00 -0700 Subject: [PATCH 17/64] updating config map --- contrib/kafka/cmd/channel_controller/main.go | 2 +- contrib/kafka/config/400-kafka-config.yaml | 28 ++++++++++++++++++++ contrib/kafka/config/500-controller.yaml | 8 +++--- contrib/kafka/config/500-dispatcher.yaml | 6 ++--- 4 files changed, 36 insertions(+), 8 deletions(-) create mode 100644 contrib/kafka/config/400-kafka-config.yaml diff --git a/contrib/kafka/cmd/channel_controller/main.go b/contrib/kafka/cmd/channel_controller/main.go index fb5205b154e..5a7feed5862 100644 --- a/contrib/kafka/cmd/channel_controller/main.go +++ b/contrib/kafka/cmd/channel_controller/main.go @@ -64,7 +64,7 @@ func main() { } // TODO the underlying config map needs to be watched and the config should be reloaded if there is a change. - kafkaConfig, err := utils.GetKafkaConfig("/etc/config-provisioner") + kafkaConfig, err := utils.GetKafkaConfig("/etc/config-kafka") if err != nil { logger.Fatalw("Error loading kafka config", zap.Error(err)) } diff --git a/contrib/kafka/config/400-kafka-config.yaml b/contrib/kafka/config/400-kafka-config.yaml new file mode 100644 index 00000000000..a668d1d98a0 --- /dev/null +++ b/contrib/kafka/config/400-kafka-config.yaml @@ -0,0 +1,28 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-kafka + namespace: knative-eventing +data: + # Broker URL. Replace this with the URLs for your kafka cluster, + # which is in the format of my-cluster-kafka-bootstrap.my-kafka-namespace:9092. + bootstrap_servers: REPLACE_WITH_CLUSTER_URL + + # Consumer mode to dispatch events from different partitions in parallel. + # By default(multiplex), partitions are multiplexed with a single go channel. + # `multiplex` and `partitions` are valid values. + ## consumer_mode: partitions \ No newline at end of file diff --git a/contrib/kafka/config/500-controller.yaml b/contrib/kafka/config/500-controller.yaml index e05c2293b41..ba40786ee2e 100644 --- a/contrib/kafka/config/500-controller.yaml +++ b/contrib/kafka/config/500-controller.yaml @@ -41,12 +41,12 @@ spec: volumeMounts: - name: config-logging mountPath: /etc/config-logging - - name: config-channel - mountPath: /etc/config-channel + - name: config-kafka + mountPath: /etc/config-kafka volumes: - name: config-logging configMap: name: config-logging - - name: config-channel + - name: config-kafka configMap: - name: kafka-channel-config + name: config-kafka diff --git a/contrib/kafka/config/500-dispatcher.yaml b/contrib/kafka/config/500-dispatcher.yaml index 4787808f34d..de65df71eea 100644 --- a/contrib/kafka/config/500-dispatcher.yaml +++ b/contrib/kafka/config/500-dispatcher.yaml @@ -37,9 +37,9 @@ spec: fieldRef: fieldPath: metadata.namespace volumeMounts: - - name: kafka-channel-controller-config + - name: kafka-config mountPath: /etc/config-provisioner volumes: - - name: kafka-channel-controller-config + - name: kafka-config configMap: - name: kafka-channel-controller-config + name: kafka-config From f89957212cbdd2d593c3dba6bbf15303f52cb905 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 21:43:10 -0700 Subject: [PATCH 18/64] setting defaults --- .../v1alpha1/kafka_channel_defaults.go | 8 +- .../v1alpha1/kafka_channel_defaults_test.go | 106 ++++++++++++++++++ .../messaging/v1alpha1/kafka_channel_types.go | 6 + .../v1alpha1/kafka_channel_validation.go | 10 ++ .../v1alpha1/kafka_channel_validation_test.go | 36 +++++- .../kafka/pkg/controller/channel/reconcile.go | 10 +- contrib/kafka/pkg/utils/util.go | 6 + 7 files changed, 172 insertions(+), 10 deletions(-) create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go index eecba31d091..6d6a025d6a9 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go @@ -18,6 +18,7 @@ package v1alpha1 import ( "context" + "github.com/knative/eventing/contrib/kafka/pkg/utils" ) func (c *KafkaChannel) SetDefaults(ctx context.Context) { @@ -25,5 +26,10 @@ func (c *KafkaChannel) SetDefaults(ctx context.Context) { } func (cs *KafkaChannelSpec) SetDefaults(ctx context.Context) { - // TODO: Nothing to default here... + if cs.NumPartitions <= 0 { + cs.NumPartitions = utils.DefaultNumPartitions + } + if cs.ReplicationFactor <= 0 { + cs.ReplicationFactor = utils.DefaultReplicationFactor + } } diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go new file mode 100644 index 00000000000..5f2846aae0d --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go @@ -0,0 +1,106 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "github.com/knative/eventing/contrib/kafka/pkg/utils" + "testing" + + "github.com/google/go-cmp/cmp" +) + +const ( + testNumPartitions = 10 + testReplicationFactor = 5 +) + +func TestKafkaChannelDefaults(t *testing.T) { + testCases := map[string]struct { + initial KafkaChannel + expected KafkaChannel + }{ + "nil spec": { + initial: KafkaChannel{}, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: utils.DefaultNumPartitions, + ReplicationFactor: utils.DefaultReplicationFactor, + }, + }, + }, + "numPartitions not set": { + initial: KafkaChannel{ + Spec: KafkaChannelSpec{ + ReplicationFactor: testReplicationFactor, + }, + }, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: utils.DefaultNumPartitions, + ReplicationFactor: testReplicationFactor, + }, + }, + }, + "numPartitions negative": { + initial: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: -10, + ReplicationFactor: testReplicationFactor, + }, + }, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: utils.DefaultNumPartitions, + ReplicationFactor: testReplicationFactor, + }, + }, + }, + "replicationFactor not set": { + initial: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: testNumPartitions, + }, + }, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: testNumPartitions, + ReplicationFactor: utils.DefaultReplicationFactor, + }, + }, + }, + "replicationFactor negative": { + initial: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: testNumPartitions, + ReplicationFactor: -10, + }, + }, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: testNumPartitions, + ReplicationFactor: utils.DefaultReplicationFactor, + }, + }, + }, + } + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + tc.initial.SetDefaults(context.TODO()) + if diff := cmp.Diff(tc.expected, tc.initial); diff != "" { + t.Fatalf("Unexpected defaults (-want, +got): %s", diff) + } + }) + } +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go index 3e685c9ddfb..57d63b9ba11 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go @@ -52,6 +52,12 @@ var _ webhook.GenericCRD = (*KafkaChannel)(nil) // KafkaChannelSpec defines the specification for a KafkaChannel. type KafkaChannelSpec struct { + // NumPartitions is the number of partitions of a Kafka topic. By default, it is set to 1. + NumPartitions int32 `json:"numPartitions"` + + // ReplicationFactor is the replication factor of a Kafka topic. By default, it is set to 1. + ReplicationFactor int16 `json:"replicationFactor"` + // KafkaChannel conforms to Duck type Subscribable. Subscribable *eventingduck.Subscribable `json:"subscribable,omitempty"` } diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go index 015f6db8d3c..c29e10d2f7b 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go @@ -29,6 +29,16 @@ func (c *KafkaChannel) Validate(ctx context.Context) *apis.FieldError { func (cs *KafkaChannelSpec) Validate(ctx context.Context) *apis.FieldError { var errs *apis.FieldError + if cs.NumPartitions <= 0 { + fe := apis.ErrInvalidValue(cs.NumPartitions, "numPartitions") + errs = errs.Also(fe) + } + + if cs.ReplicationFactor <= 0 { + fe := apis.ErrInvalidValue(cs.ReplicationFactor, "replicationFactor") + errs = errs.Also(fe) + } + if cs.Subscribable != nil { for i, subscriber := range cs.Subscribable.Subscribers { if subscriber.ReplyURI == "" && subscriber.SubscriberURI == "" { diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go index 468019cd99b..ba378fb41b4 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go @@ -36,13 +36,43 @@ func TestKafkaChannelValidation(t *testing.T) { Spec: KafkaChannelSpec{}, }, want: func() *apis.FieldError { - fe := apis.ErrMissingField("spec.bootstrapServers, spec.consumerMode") + var errs *apis.FieldError + fe := apis.ErrInvalidValue(0, "spec.numPartitions") + errs = errs.Also(fe) + fe = apis.ErrInvalidValue(0, "spec.replicationFactor") + errs = errs.Also(fe) + return errs + }(), + }, + "negative numPartitions": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: -10, + ReplicationFactor: 1, + }, + }, + want: func() *apis.FieldError { + fe := apis.ErrInvalidValue(-10, "spec.numPartitions") + return fe + }(), + }, + "negative replicationFactor": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: 1, + ReplicationFactor: -10, + }, + }, + want: func() *apis.FieldError { + fe := apis.ErrInvalidValue(-10, "spec.replicationFactor") return fe }(), }, "valid subscribers array": { cr: &KafkaChannel{ Spec: KafkaChannelSpec{ + NumPartitions: 1, + ReplicationFactor: 1, Subscribable: &eventingduck.Subscribable{ Subscribers: []eventingduck.ChannelSubscriberSpec{{ SubscriberURI: "subscriberendpoint", @@ -55,6 +85,8 @@ func TestKafkaChannelValidation(t *testing.T) { "empty subscriber at index 1": { cr: &KafkaChannel{ Spec: KafkaChannelSpec{ + NumPartitions: 1, + ReplicationFactor: 1, Subscribable: &eventingduck.Subscribable{ Subscribers: []eventingduck.ChannelSubscriberSpec{{ SubscriberURI: "subscriberendpoint", @@ -71,6 +103,8 @@ func TestKafkaChannelValidation(t *testing.T) { "two empty subscribers": { cr: &KafkaChannel{ Spec: KafkaChannelSpec{ + NumPartitions: 1, + ReplicationFactor: 1, Subscribable: &eventingduck.Subscribable{ Subscribers: []eventingduck.ChannelSubscriberSpec{{}, {}}, }, diff --git a/contrib/kafka/pkg/controller/channel/reconcile.go b/contrib/kafka/pkg/controller/channel/reconcile.go index ba7e271557d..e1ef6119518 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile.go +++ b/contrib/kafka/pkg/controller/channel/reconcile.go @@ -39,12 +39,6 @@ import ( const ( finalizerName = controllerAgentName - // DefaultNumPartitions defines the default number of partitions - DefaultNumPartitions = 1 - - // DefaultReplicationFactor defines the default number of replications - DefaultReplicationFactor = 1 - // Name of the corev1.Events emitted from the reconciliation process dispatcherReconcileFailed = "DispatcherReconcileFailed" dispatcherUpdateStatusFailed = "DispatcherUpdateStatusFailed" @@ -200,11 +194,11 @@ func (r *reconciler) provisionChannel(channel *eventingv1alpha1.Channel, kafkaCl } if arguments.NumPartitions == 0 { - arguments.NumPartitions = DefaultNumPartitions + arguments.NumPartitions = utils.DefaultNumPartitions } if arguments.ReplicationFactor == 0 { - arguments.ReplicationFactor = DefaultReplicationFactor + arguments.ReplicationFactor = utils.DefaultReplicationFactor } err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ diff --git a/contrib/kafka/pkg/utils/util.go b/contrib/kafka/pkg/utils/util.go index 9e5400b2fc4..408132b087a 100644 --- a/contrib/kafka/pkg/utils/util.go +++ b/contrib/kafka/pkg/utils/util.go @@ -32,6 +32,12 @@ const ( ConsumerModePartitionConsumerValue = "partitions" ConsumerModeMultiplexConsumerValue = "multiplex" KafkaChannelSeparator = "." + + // DefaultNumPartitions defines the default number of partitions + DefaultNumPartitions = 1 + + // DefaultReplicationFactor defines the default number of replications + DefaultReplicationFactor = 1 ) type KafkaConfig struct { From 6577838fd0a43ae0fd5ea323cd0716129db307b7 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 22:20:25 -0700 Subject: [PATCH 19/64] removing unnecessary file --- contrib/kafka/pkg/apis/messaging/register.go | 21 ------------------- .../pkg/apis/messaging/v1alpha1/register.go | 8 ++++--- 2 files changed, 5 insertions(+), 24 deletions(-) delete mode 100644 contrib/kafka/pkg/apis/messaging/register.go diff --git a/contrib/kafka/pkg/apis/messaging/register.go b/contrib/kafka/pkg/apis/messaging/register.go deleted file mode 100644 index 8f678adcd23..00000000000 --- a/contrib/kafka/pkg/apis/messaging/register.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package messaging - -const ( - GroupName = "messaging.knative.dev" -) diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go index de1ed247c6f..ba2eb8d3883 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go @@ -17,15 +17,17 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/eventing/pkg/apis/messaging" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) +const ( + groupName = "messaging.knative.dev" +) + // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: messaging.GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: groupName, Version: "v1alpha1"} // Kind takes an unqualified kind and returns back a Group qualified GroupKind func Kind(kind string) schema.GroupKind { From 80881429a0f35a59006d76fae07b5ae29422fff6 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 22:27:20 -0700 Subject: [PATCH 20/64] removing clientArgs --- .../kafka/pkg/reconciler/controller/kafkachannel.go | 6 +----- .../pkg/reconciler/controller/resources/client.go | 11 +++-------- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go index 7eda435a4a7..7ed2d492420 100644 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -383,11 +383,7 @@ func (r *Reconciler) createClient(ctx context.Context, kc *v1alpha1.KafkaChannel kafkaClusterAdmin := r.kafkaClusterAdmin if kafkaClusterAdmin == nil { var err error - args := &resources.ClientArgs{ - ClientID: controllerAgentName, - BootstrapServers: r.kafkaConfig.Brokers, - } - kafkaClusterAdmin, err = resources.MakeClient(args) + kafkaClusterAdmin, err = resources.MakeClient(controllerAgentName, r.kafkaConfig.Brokers) if err != nil { return nil, err } diff --git a/contrib/kafka/pkg/reconciler/controller/resources/client.go b/contrib/kafka/pkg/reconciler/controller/resources/client.go index 40f397d92e2..d2af5852ecd 100644 --- a/contrib/kafka/pkg/reconciler/controller/resources/client.go +++ b/contrib/kafka/pkg/reconciler/controller/resources/client.go @@ -20,14 +20,9 @@ import ( "github.com/Shopify/sarama" ) -type ClientArgs struct { - ClientID string - BootstrapServers []string -} - -func MakeClient(args *ClientArgs) (sarama.ClusterAdmin, error) { +func MakeClient(clientID string, bootstrapServers []string) (sarama.ClusterAdmin, error) { saramaConf := sarama.NewConfig() saramaConf.Version = sarama.V1_1_0_0 - saramaConf.ClientID = args.ClientID - return sarama.NewClusterAdmin(args.BootstrapServers, saramaConf) + saramaConf.ClientID = clientID + return sarama.NewClusterAdmin(bootstrapServers, saramaConf) } From 5edb0d9218b4b8fd7cc145475587d86654ada4a9 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 22:36:59 -0700 Subject: [PATCH 21/64] removing params --- contrib/kafka/config/300-kafka-channel.yaml | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/contrib/kafka/config/300-kafka-channel.yaml b/contrib/kafka/config/300-kafka-channel.yaml index 8fbbae0a57a..0235e124bc9 100644 --- a/contrib/kafka/config/300-kafka-channel.yaml +++ b/contrib/kafka/config/300-kafka-channel.yaml @@ -52,14 +52,6 @@ spec: properties: spec: properties: - bootstrapServers: - type: string - description: "Comma-separated list of the Broker URL of the Kafka cluster, which is in the format of `my-cluster-kafka-bootstrap.my-kafka-namespace:9092`." - minLength: 1 - consumerMode: - type: string - description: "Mode used to dispatch events from different partitions in parallel. `multiplex` and `partitions` are valid values. If not set, `multiplex` is used." - minLength: 1 numPartitions: type: int32 description: "Number of partitions of a Kafka topic." @@ -104,7 +96,4 @@ spec: minLength: 1 replyURI: type: string - minLength: 1 - required: - - bootstrapServers - - consumerMode + minLength: 1 \ No newline at end of file From 2556368e3663d2636ba96fbca33bb0b0f945fc3b Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 22:43:26 -0700 Subject: [PATCH 22/64] config-kafka --- contrib/kafka/cmd/channel_dispatcher/main.go | 97 ++++++++++++++++++++ contrib/kafka/config/500-dispatcher.yaml | 10 +- 2 files changed, 102 insertions(+), 5 deletions(-) create mode 100644 contrib/kafka/cmd/channel_dispatcher/main.go diff --git a/contrib/kafka/cmd/channel_dispatcher/main.go b/contrib/kafka/cmd/channel_dispatcher/main.go new file mode 100644 index 00000000000..635dd1b3e93 --- /dev/null +++ b/contrib/kafka/cmd/channel_dispatcher/main.go @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "github.com/knative/eventing/contrib/kafka/pkg/utils" + "log" + + "github.com/knative/eventing/contrib/kafka/pkg/controller" + "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/channelwatcher" + "github.com/knative/eventing/pkg/tracing" + "github.com/knative/pkg/configmap" + "github.com/knative/pkg/signals" + "github.com/knative/pkg/system" + "go.uber.org/zap" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +func main() { + flag.Parse() + logger, err := zap.NewProduction() + if err != nil { + log.Fatalf("Unable to create logger: %v", err) + } + kafkaConfig, err := utils.GetKafkaConfig("/etc/config-kafka") + if err != nil { + logger.Fatal("Unable to load provisioner config", zap.Error(err)) + } + + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + logger.Fatal("Unable to create manager.", zap.Error(err)) + } + + kafkaDispatcher, err := dispatcher.NewDispatcher(kafkaConfig.Brokers, kafkaConfig.ConsumerMode, logger) + if err != nil { + logger.Fatal("Unable to create kafka dispatcher", zap.Error(err)) + } + if err = mgr.Add(kafkaDispatcher); err != nil { + logger.Fatal("Unable to add kafkaDispatcher", zap.Error(err)) + } + + if err := v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { + logger.Fatal("Unable to add scheme for eventing apis.", zap.Error(err)) + } + + // Zipkin tracing. + kc := kubernetes.NewForConfigOrDie(mgr.GetConfig()) + configMapWatcher := configmap.NewInformedWatcher(kc, system.Namespace()) + if err = tracing.SetupDynamicZipkinPublishing(logger.Sugar(), configMapWatcher, "kafka-dispatcher"); err != nil { + logger.Fatal("Error setting up Zipkin publishing", zap.Error(err)) + } + + if err = channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)); err != nil { + logger.Fatal("Unable to create channel watcher.", zap.Error(err)) + } + + // set up signals so we handle the first shutdown signal gracefully + stopCh := signals.SetupSignalHandler() + + // configMapWatcher does not block, so start it first. + if err = configMapWatcher.Start(stopCh); err != nil { + logger.Fatal("Failed to start ConfigMap watcher", zap.Error(err)) + } + + // Start blocks forever. + err = mgr.Start(stopCh) + if err != nil { + logger.Fatal("Manager.Start() returned an error", zap.Error(err)) + } + logger.Info("Exiting...") +} + +func shouldWatch(ch *v1alpha1.Channel) bool { + return ch.Spec.Provisioner != nil && + ch.Spec.Provisioner.Namespace == "" && + ch.Spec.Provisioner.Name == controller.Name +} diff --git a/contrib/kafka/config/500-dispatcher.yaml b/contrib/kafka/config/500-dispatcher.yaml index de65df71eea..8ca227062d7 100644 --- a/contrib/kafka/config/500-dispatcher.yaml +++ b/contrib/kafka/config/500-dispatcher.yaml @@ -30,16 +30,16 @@ spec: serviceAccountName: kafka-dispatcher containers: - name: dispatcher - image: github.com/knative/eventing/contrib/kafka/cmd/dispatcher + image: github.com/knative/eventing/contrib/kafka/cmd/channel_dispatcher env: - name: SYSTEM_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - - name: kafka-config - mountPath: /etc/config-provisioner + - name: config-kafka + mountPath: /etc/config-kafka volumes: - - name: kafka-config + - name: config-kafka configMap: - name: kafka-config + name: config-kafka From affe68140d9372e836d80fed83acba7fe4672339 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 22:53:18 -0700 Subject: [PATCH 23/64] patching --- contrib/kafka/config/200-controller-clusterrole.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/kafka/config/200-controller-clusterrole.yaml b/contrib/kafka/config/200-controller-clusterrole.yaml index a4221f2da9d..093ad2b73cd 100644 --- a/contrib/kafka/config/200-controller-clusterrole.yaml +++ b/contrib/kafka/config/200-controller-clusterrole.yaml @@ -27,6 +27,7 @@ rules: - list - watch - update + - patch - apiGroups: - "" # Core API group. resources: From 23e8e0123e6db10cea0bfc8921867b0ce5223b35 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 23:14:24 -0700 Subject: [PATCH 24/64] updates --- .../v1alpha1/kafka_channel_defaults.go | 4 +-- .../v1alpha1/kafka_channel_defaults_test.go | 28 ------------------- .../messaging/v1alpha1/kafka_channel_types.go | 4 +-- .../pkg/reconciler/controller/kafkachannel.go | 4 +-- 4 files changed, 6 insertions(+), 34 deletions(-) diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go index 6d6a025d6a9..e563a7cf759 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go @@ -26,10 +26,10 @@ func (c *KafkaChannel) SetDefaults(ctx context.Context) { } func (cs *KafkaChannelSpec) SetDefaults(ctx context.Context) { - if cs.NumPartitions <= 0 { + if cs.NumPartitions == 0 { cs.NumPartitions = utils.DefaultNumPartitions } - if cs.ReplicationFactor <= 0 { + if cs.ReplicationFactor == 0 { cs.ReplicationFactor = utils.DefaultReplicationFactor } } diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go index 5f2846aae0d..b22e88c9dae 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go @@ -53,20 +53,6 @@ func TestKafkaChannelDefaults(t *testing.T) { }, }, }, - "numPartitions negative": { - initial: KafkaChannel{ - Spec: KafkaChannelSpec{ - NumPartitions: -10, - ReplicationFactor: testReplicationFactor, - }, - }, - expected: KafkaChannel{ - Spec: KafkaChannelSpec{ - NumPartitions: utils.DefaultNumPartitions, - ReplicationFactor: testReplicationFactor, - }, - }, - }, "replicationFactor not set": { initial: KafkaChannel{ Spec: KafkaChannelSpec{ @@ -80,20 +66,6 @@ func TestKafkaChannelDefaults(t *testing.T) { }, }, }, - "replicationFactor negative": { - initial: KafkaChannel{ - Spec: KafkaChannelSpec{ - NumPartitions: testNumPartitions, - ReplicationFactor: -10, - }, - }, - expected: KafkaChannel{ - Spec: KafkaChannelSpec{ - NumPartitions: testNumPartitions, - ReplicationFactor: utils.DefaultReplicationFactor, - }, - }, - }, } for n, tc := range testCases { t.Run(n, func(t *testing.T) { diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go index 57d63b9ba11..ac4d61fae4a 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go @@ -53,10 +53,10 @@ var _ webhook.GenericCRD = (*KafkaChannel)(nil) // KafkaChannelSpec defines the specification for a KafkaChannel. type KafkaChannelSpec struct { // NumPartitions is the number of partitions of a Kafka topic. By default, it is set to 1. - NumPartitions int32 `json:"numPartitions"` + NumPartitions int `json:"numPartitions"` // ReplicationFactor is the replication factor of a Kafka topic. By default, it is set to 1. - ReplicationFactor int16 `json:"replicationFactor"` + ReplicationFactor int `json:"replicationFactor"` // KafkaChannel conforms to Duck type Subscribable. Subscribable *eventingduck.Subscribable `json:"subscribable,omitempty"` diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go index 7ed2d492420..ceb3251b898 100644 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -397,8 +397,8 @@ func (r *Reconciler) createTopic(ctx context.Context, channel *v1alpha1.KafkaCha topicName := resources.MakeTopicName(channel) logger.Info("Creating topic on Kafka cluster", zap.String("topic", topicName)) err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ - ReplicationFactor: channel.Spec.ReplicationFactor, - NumPartitions: channel.Spec.NumPartitions, + ReplicationFactor: int16(channel.Spec.ReplicationFactor), + NumPartitions: int32(channel.Spec.NumPartitions), }, false) if err == sarama.ErrTopicAlreadyExists { return nil From 5340871213c319b46509799d4f67ebd28aa6e5db Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 23:23:16 -0700 Subject: [PATCH 25/64] to int --- contrib/kafka/config/300-kafka-channel.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/kafka/config/300-kafka-channel.yaml b/contrib/kafka/config/300-kafka-channel.yaml index 0235e124bc9..b568134ff7a 100644 --- a/contrib/kafka/config/300-kafka-channel.yaml +++ b/contrib/kafka/config/300-kafka-channel.yaml @@ -53,10 +53,10 @@ spec: spec: properties: numPartitions: - type: int32 + type: int description: "Number of partitions of a Kafka topic." replicationFactor: - type: int16 + type: int description: "Replication factor of a Kafka topic." subscribable: type: object From aaa21ecb3a65addadda6c55f4e51b606fc6a8ab3 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 21 May 2019 23:48:42 -0700 Subject: [PATCH 26/64] still errors --- contrib/kafka/config/300-kafka-channel.yaml | 6 ++++-- .../pkg/apis/messaging/v1alpha1/kafka_channel_types.go | 4 ++-- contrib/kafka/pkg/reconciler/controller/kafkachannel.go | 5 +++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/contrib/kafka/config/300-kafka-channel.yaml b/contrib/kafka/config/300-kafka-channel.yaml index b568134ff7a..4146df0c1f9 100644 --- a/contrib/kafka/config/300-kafka-channel.yaml +++ b/contrib/kafka/config/300-kafka-channel.yaml @@ -53,10 +53,12 @@ spec: spec: properties: numPartitions: - type: int + format: int32 + type: integer description: "Number of partitions of a Kafka topic." replicationFactor: - type: int + format: int16 + type: integer description: "Replication factor of a Kafka topic." subscribable: type: object diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go index ac4d61fae4a..57d63b9ba11 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go @@ -53,10 +53,10 @@ var _ webhook.GenericCRD = (*KafkaChannel)(nil) // KafkaChannelSpec defines the specification for a KafkaChannel. type KafkaChannelSpec struct { // NumPartitions is the number of partitions of a Kafka topic. By default, it is set to 1. - NumPartitions int `json:"numPartitions"` + NumPartitions int32 `json:"numPartitions"` // ReplicationFactor is the replication factor of a Kafka topic. By default, it is set to 1. - ReplicationFactor int `json:"replicationFactor"` + ReplicationFactor int16 `json:"replicationFactor"` // KafkaChannel conforms to Duck type Subscribable. Subscribable *eventingduck.Subscribable `json:"subscribable,omitempty"` diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go index ceb3251b898..3c3b875d6f0 100644 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -246,6 +246,7 @@ func (r *Reconciler) reconcile(ctx context.Context, kc *v1alpha1.KafkaChannel) e kc.Status.MarkTopicFailed("TopicCreateFailed", "error while creating topic: %s", err) return err } + kc.Status.MarkTopicTrue() // Get the Dispatcher Deployment and propagate the status to the Channel d, err := r.deploymentLister.Deployments(r.dispatcherNamespace).Get(r.dispatcherDeploymentName) @@ -397,8 +398,8 @@ func (r *Reconciler) createTopic(ctx context.Context, channel *v1alpha1.KafkaCha topicName := resources.MakeTopicName(channel) logger.Info("Creating topic on Kafka cluster", zap.String("topic", topicName)) err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ - ReplicationFactor: int16(channel.Spec.ReplicationFactor), - NumPartitions: int32(channel.Spec.NumPartitions), + ReplicationFactor: channel.Spec.ReplicationFactor, + NumPartitions: channel.Spec.NumPartitions, }, false) if err == sarama.ErrTopicAlreadyExists { return nil From 9bf97ab1f112963a2c78abd23aab44bd11f48ff2 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Wed, 22 May 2019 00:06:24 -0700 Subject: [PATCH 27/64] changing dispatcher name --- contrib/kafka/cmd/channel_controller/main.go | 4 +++- contrib/kafka/config/200-dispatcher-service.yaml | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/contrib/kafka/cmd/channel_controller/main.go b/contrib/kafka/cmd/channel_controller/main.go index 5a7feed5862..870d0daee55 100644 --- a/contrib/kafka/cmd/channel_controller/main.go +++ b/contrib/kafka/cmd/channel_controller/main.go @@ -41,7 +41,9 @@ import ( const ( dispatcherDeploymentName = "kafka-dispatcher" - dispatcherServiceName = "kafka-dispatcher" + // The dispatcher service name used with the provisioner model is kafka-dispatcher, thus we change + // it to be kafka-channel-dispatcher for CRDs. + dispatcherServiceName = "kafka-channel-dispatcher" ) var ( diff --git a/contrib/kafka/config/200-dispatcher-service.yaml b/contrib/kafka/config/200-dispatcher-service.yaml index 31172e49d65..14e8775b6bf 100644 --- a/contrib/kafka/config/200-dispatcher-service.yaml +++ b/contrib/kafka/config/200-dispatcher-service.yaml @@ -15,7 +15,9 @@ apiVersion: v1 kind: Service metadata: - name: kafka-dispatcher + # The dispatcher service name used with the provisioner model is kafka-dispatcher, thus we change + # it to be kafka-channel-dispatcher for CRDs. + name: kafka-channel-dispatcher namespace: knative-eventing labels: messaging.knative.dev/channel: kafka-channel From 02f812796a33798a9c7e2649c6be25e200ec5c50 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Wed, 22 May 2019 00:27:56 -0700 Subject: [PATCH 28/64] properly removing finalizer --- contrib/kafka/pkg/reconciler/controller/kafkachannel.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go index 3c3b875d6f0..a64928a6a61 100644 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -226,7 +226,8 @@ func (r *Reconciler) reconcile(ctx context.Context, kc *v1alpha1.KafkaChannel) e return err } removeFinalizer(kc) - return nil + _, err := r.eventingClientSet.MessagingV1alpha1().KafkaChannels(kc.Namespace).Update(kc) + return err } // If we are adding the finalizer for the first time, then ensure that finalizer is persisted From a06fe96ae1239914f63af1d76c230f1d451129e2 Mon Sep 17 00:00:00 2001 From: nachocano Date: Wed, 22 May 2019 10:53:45 -0700 Subject: [PATCH 29/64] Controller part of the Kafka CRD. Dispatcher piece is still missing. Based on changes from InMemoryChannel CRD. --- contrib/kafka/cmd/channel_controller/main.go | 188 +++++++ contrib/kafka/cmd/channel_dispatcher/main.go | 98 ++++ contrib/kafka/cmd/controller/main.go | 21 +- contrib/kafka/cmd/dispatcher/main.go | 4 +- .../config/200-controller-clusterrole.yaml | 87 ++++ .../config/200-dispatcher-clusterrole.yaml | 36 ++ .../kafka/config/200-dispatcher-service.yaml | 30 ++ contrib/kafka/config/200-serviceaccount.yaml | 26 + .../kafka/config/201-clusterrolebinding.yaml | 43 ++ contrib/kafka/config/300-kafka-channel.yaml | 101 ++++ contrib/kafka/config/400-kafka-config.yaml | 28 ++ contrib/kafka/config/500-controller.yaml | 52 ++ contrib/kafka/config/500-dispatcher.yaml | 45 ++ .../kafka/pkg/apis/messaging/v1alpha1/doc.go | 20 + .../v1alpha1/kafka_channel_defaults.go | 35 ++ .../v1alpha1/kafka_channel_defaults_test.go | 78 +++ .../v1alpha1/kafka_channel_lifecycle.go | 135 +++++ .../v1alpha1/kafka_channel_lifecycle_test.go | 404 +++++++++++++++ .../messaging/v1alpha1/kafka_channel_types.go | 93 ++++ .../v1alpha1/kafka_channel_validation.go | 52 ++ .../v1alpha1/kafka_channel_validation_test.go | 134 +++++ .../pkg/apis/messaging/v1alpha1/register.go | 55 +++ .../v1alpha1/zz_generated.deepcopy.go | 126 +++++ .../client/clientset/versioned/clientset.go | 98 ++++ .../pkg/client/clientset/versioned/doc.go | 20 + .../versioned/fake/clientset_generated.go | 82 ++++ .../client/clientset/versioned/fake/doc.go | 20 + .../clientset/versioned/fake/register.go | 56 +++ .../client/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 56 +++ .../versioned/typed/messaging/v1alpha1/doc.go | 20 + .../typed/messaging/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_kafkachannel.go | 140 ++++++ .../v1alpha1/fake/fake_messaging_client.go | 40 ++ .../messaging/v1alpha1/generated_expansion.go | 21 + .../typed/messaging/v1alpha1/kafkachannel.go | 174 +++++++ .../messaging/v1alpha1/messaging_client.go | 90 ++++ .../informers/externalversions/factory.go | 180 +++++++ .../informers/externalversions/generic.go | 62 +++ .../internalinterfaces/factory_interfaces.go | 38 ++ .../externalversions/messaging/interface.go | 46 ++ .../messaging/v1alpha1/interface.go | 45 ++ .../messaging/v1alpha1/kafkachannel.go | 89 ++++ .../messaging/v1alpha1/expansion_generated.go | 27 + .../messaging/v1alpha1/kafkachannel.go | 94 ++++ .../kafka/pkg/controller/channel/provider.go | 5 +- .../kafka/pkg/controller/channel/reconcile.go | 17 +- .../pkg/controller/channel/reconcile_test.go | 6 +- contrib/kafka/pkg/controller/provider.go | 5 +- .../kafka/pkg/controller/reconcile_test.go | 5 +- contrib/kafka/pkg/controller/types.go | 8 - contrib/kafka/pkg/controller/util.go | 59 --- contrib/kafka/pkg/dispatcher/dispatcher.go | 5 +- contrib/kafka/pkg/reconciler/kafkachannel.go | 462 ++++++++++++++++++ .../kafka/pkg/reconciler/kafkachannel_test.go | 433 ++++++++++++++++ .../kafka/pkg/reconciler/resources/client.go | 28 ++ .../kafka/pkg/reconciler/resources/service.go | 96 ++++ .../pkg/reconciler/resources/service_test.go | 143 ++++++ .../kafka/pkg/reconciler/resources/topic.go | 30 ++ contrib/kafka/pkg/utils/util.go | 86 ++++ .../pkg/{controller => utils}/util_test.go | 42 +- hack/update-codegen.sh | 13 + 62 files changed, 4666 insertions(+), 106 deletions(-) create mode 100644 contrib/kafka/cmd/channel_controller/main.go create mode 100644 contrib/kafka/cmd/channel_dispatcher/main.go create mode 100644 contrib/kafka/config/200-controller-clusterrole.yaml create mode 100644 contrib/kafka/config/200-dispatcher-clusterrole.yaml create mode 100644 contrib/kafka/config/200-dispatcher-service.yaml create mode 100644 contrib/kafka/config/200-serviceaccount.yaml create mode 100644 contrib/kafka/config/201-clusterrolebinding.yaml create mode 100644 contrib/kafka/config/300-kafka-channel.yaml create mode 100644 contrib/kafka/config/400-kafka-config.yaml create mode 100644 contrib/kafka/config/500-controller.yaml create mode 100644 contrib/kafka/config/500-dispatcher.yaml create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/doc.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/register.go create mode 100644 contrib/kafka/pkg/apis/messaging/v1alpha1/zz_generated.deepcopy.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/clientset.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/doc.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/fake/clientset_generated.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/fake/doc.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/fake/register.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/scheme/doc.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/scheme/register.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/doc.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/doc.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_kafkachannel.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_messaging_client.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/generated_expansion.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/kafkachannel.go create mode 100644 contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/messaging_client.go create mode 100644 contrib/kafka/pkg/client/informers/externalversions/factory.go create mode 100644 contrib/kafka/pkg/client/informers/externalversions/generic.go create mode 100644 contrib/kafka/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 contrib/kafka/pkg/client/informers/externalversions/messaging/interface.go create mode 100644 contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/interface.go create mode 100644 contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/kafkachannel.go create mode 100644 contrib/kafka/pkg/client/listers/messaging/v1alpha1/expansion_generated.go create mode 100644 contrib/kafka/pkg/client/listers/messaging/v1alpha1/kafkachannel.go delete mode 100644 contrib/kafka/pkg/controller/types.go delete mode 100644 contrib/kafka/pkg/controller/util.go create mode 100644 contrib/kafka/pkg/reconciler/kafkachannel.go create mode 100644 contrib/kafka/pkg/reconciler/kafkachannel_test.go create mode 100644 contrib/kafka/pkg/reconciler/resources/client.go create mode 100644 contrib/kafka/pkg/reconciler/resources/service.go create mode 100644 contrib/kafka/pkg/reconciler/resources/service_test.go create mode 100644 contrib/kafka/pkg/reconciler/resources/topic.go create mode 100644 contrib/kafka/pkg/utils/util.go rename contrib/kafka/pkg/{controller => utils}/util_test.go (72%) diff --git a/contrib/kafka/cmd/channel_controller/main.go b/contrib/kafka/cmd/channel_controller/main.go new file mode 100644 index 00000000000..d6d867f3894 --- /dev/null +++ b/contrib/kafka/cmd/channel_controller/main.go @@ -0,0 +1,188 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "github.com/knative/eventing/contrib/kafka/pkg/utils" + "log" + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + informers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions" + kafkachannel "github.com/knative/eventing/contrib/kafka/pkg/reconciler" + "github.com/knative/eventing/pkg/logconfig" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/reconciler" + "github.com/knative/pkg/configmap" + kncontroller "github.com/knative/pkg/controller" + "github.com/knative/pkg/signals" + "github.com/knative/pkg/system" + "go.uber.org/zap" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +const ( + dispatcherDeploymentName = "kafka-ch-dispatcher" + dispatcherServiceName = "kafka-ch-dispatcher" +) + +var ( + hardcodedLoggingConfig = flag.Bool("hardCodedLoggingConfig", false, "If true, use the hard coded logging config. It is intended to be used only when debugging outside a Kubernetes cluster.") + masterURL = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") + kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") +) + +func main() { + flag.Parse() + logger, atomicLevel := setupLogger() + defer logger.Sync() + + // set up signals so we handle the first shutdown signal gracefully + stopCh := signals.SetupSignalHandler() + + cfg, err := clientcmd.BuildConfigFromFlags(*masterURL, *kubeconfig) + if err != nil { + logger.Fatalw("Error building kubeconfig", zap.Error(err)) + } + + // TODO the underlying config map needs to be watched and the config should be reloaded if there is a change. + kafkaConfig, err := utils.GetKafkaConfig("/etc/config-kafka") + if err != nil { + logger.Fatalw("Error loading kafka config", zap.Error(err)) + } + + logger = logger.With(zap.String("controller/impl", "pkg")) + logger.Info("Starting the Kafka controller") + + systemNS := system.Namespace() + + const numControllers = 1 + cfg.QPS = numControllers * rest.DefaultQPS + cfg.Burst = numControllers * rest.DefaultBurst + opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh) + // Setting up our own eventingClientSet as we need the messaging API introduced with kafka. + eventingClientSet := clientset.NewForConfigOrDie(cfg) + + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(opt.KubeClientSet, opt.ResyncPeriod) + eventingInformerFactory := informers.NewSharedInformerFactory(eventingClientSet, opt.ResyncPeriod) + + // Messaging + kafkaChannelInformer := eventingInformerFactory.Messaging().V1alpha1().KafkaChannels() + + // Kube + serviceInformer := kubeInformerFactory.Core().V1().Services() + endpointsInformer := kubeInformerFactory.Core().V1().Endpoints() + deploymentInformer := kubeInformerFactory.Apps().V1().Deployments() + + // Build all of our controllers, with the clients constructed above. + // Add new controllers to this array. + // You also need to modify numControllers above to match this. + controllers := [...]*kncontroller.Impl{ + kafkachannel.NewController( + opt, + eventingClientSet, + kafkaConfig, + systemNS, + dispatcherDeploymentName, + dispatcherServiceName, + kafkaChannelInformer, + deploymentInformer, + serviceInformer, + endpointsInformer, + ), + } + // This line asserts at compile time that the length of controllers is equal to numControllers. + // It is based on https://go101.org/article/tips.html#assert-at-compile-time, which notes that + // var _ [N-M]int + // asserts at compile time that N >= M, which we can use to establish equality of N and M: + // (N >= M) && (M >= N) => (N == M) + var _ [numControllers - len(controllers)][len(controllers) - numControllers]int + + // Watch the logging config map and dynamically update logging levels. + opt.ConfigMapWatcher.Watch(logconfig.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.Controller)) + // TODO: Watch the observability config map and dynamically update metrics exporter. + //opt.ConfigMapWatcher.Watch(metrics.ObservabilityConfigName, metrics.UpdateExporterFromConfigMap(component, logger)) + if err := opt.ConfigMapWatcher.Start(stopCh); err != nil { + logger.Fatalw("failed to start configuration manager", zap.Error(err)) + } + + // Start all of the informers and wait for them to sync. + logger.Info("Starting informers.") + if err := kncontroller.StartInformers( + stopCh, + // Messaging + kafkaChannelInformer.Informer(), + + // Kube + serviceInformer.Informer(), + deploymentInformer.Informer(), + endpointsInformer.Informer(), + ); err != nil { + logger.Fatalf("Failed to start informers: %v", err) + } + + logger.Info("Starting controllers.") + kncontroller.StartAll(stopCh, controllers[:]...) +} + +func setupLogger() (*zap.SugaredLogger, zap.AtomicLevel) { + // Set up our logger. + loggingConfigMap := getLoggingConfigOrDie() + loggingConfig, err := logging.NewConfigFromMap(loggingConfigMap) + if err != nil { + log.Fatalf("Error parsing logging configuration: %v", err) + } + return logging.NewLoggerFromConfig(loggingConfig, logconfig.Controller) +} + +func getLoggingConfigOrDie() map[string]string { + if hardcodedLoggingConfig != nil && *hardcodedLoggingConfig { + return map[string]string{ + "loglevel.controller": "info", + "zap-logger-config": ` + { + "level": "info", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + }`, + } + } else { + cm, err := configmap.Load("/etc/config-logging") + if err != nil { + log.Fatalf("Error loading logging configuration: %v", err) + } + return cm + } +} diff --git a/contrib/kafka/cmd/channel_dispatcher/main.go b/contrib/kafka/cmd/channel_dispatcher/main.go new file mode 100644 index 00000000000..995710bca9d --- /dev/null +++ b/contrib/kafka/cmd/channel_dispatcher/main.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "github.com/knative/eventing/contrib/kafka/pkg/utils" + "log" + + "github.com/knative/eventing/contrib/kafka/pkg/controller" + "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/channelwatcher" + "github.com/knative/eventing/pkg/tracing" + "github.com/knative/pkg/configmap" + "github.com/knative/pkg/signals" + "github.com/knative/pkg/system" + "go.uber.org/zap" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +// TODO update the dispatcher in follow up PR. +func main() { + flag.Parse() + logger, err := zap.NewProduction() + if err != nil { + log.Fatalf("Unable to create logger: %v", err) + } + kafkaConfig, err := utils.GetKafkaConfig("/etc/config-kafka") + if err != nil { + logger.Fatal("Unable to load provisioner config", zap.Error(err)) + } + + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + logger.Fatal("Unable to create manager.", zap.Error(err)) + } + + kafkaDispatcher, err := dispatcher.NewDispatcher(kafkaConfig.Brokers, kafkaConfig.ConsumerMode, logger) + if err != nil { + logger.Fatal("Unable to create kafka dispatcher", zap.Error(err)) + } + if err = mgr.Add(kafkaDispatcher); err != nil { + logger.Fatal("Unable to add the kafka dispatcher", zap.Error(err)) + } + + if err := v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { + logger.Fatal("Unable to add scheme for eventing apis.", zap.Error(err)) + } + + // Zipkin tracing. + kc := kubernetes.NewForConfigOrDie(mgr.GetConfig()) + configMapWatcher := configmap.NewInformedWatcher(kc, system.Namespace()) + if err = tracing.SetupDynamicZipkinPublishing(logger.Sugar(), configMapWatcher, "kafka-ch-dispatcher"); err != nil { + logger.Fatal("Error setting up Zipkin publishing", zap.Error(err)) + } + + if err = channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)); err != nil { + logger.Fatal("Unable to create channel watcher.", zap.Error(err)) + } + + // set up signals so we handle the first shutdown signal gracefully + stopCh := signals.SetupSignalHandler() + + // configMapWatcher does not block, so start it first. + if err = configMapWatcher.Start(stopCh); err != nil { + logger.Fatal("Failed to start ConfigMap watcher", zap.Error(err)) + } + + // Start blocks forever. + err = mgr.Start(stopCh) + if err != nil { + logger.Fatal("Manager.Start() returned an error", zap.Error(err)) + } + logger.Info("Exiting...") +} + +func shouldWatch(ch *v1alpha1.Channel) bool { + return ch.Spec.Provisioner != nil && + ch.Spec.Provisioner.Namespace == "" && + ch.Spec.Provisioner.Name == controller.Name +} diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index 96d500834e7..cbf42e3aa30 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -1,7 +1,24 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package main import ( "flag" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "os" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). @@ -24,7 +41,7 @@ import ( type SchemeFunc func(*runtime.Scheme) error // ProvideFunc adds a controller to a Manager. -type ProvideFunc func(mgr manager.Manager, config *provisionerController.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) +type ProvideFunc func(mgr manager.Manager, config *utils.KafkaConfig, logger *zap.Logger) (controller.Controller, error) func main() { os.Exit(_main()) @@ -60,7 +77,7 @@ func _main() int { } // TODO the underlying config map needs to be watched and the config should be reloaded if there is a change. - provisionerConfig, err := provisionerController.GetProvisionerConfig("/etc/config-provisioner") + provisionerConfig, err := utils.GetKafkaConfig("/etc/config-provisioner") if err != nil { logger.Error(err, "unable to run controller manager") diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index b9d268d48d9..da004665bb7 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -18,10 +18,10 @@ package main import ( "flag" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "log" "github.com/knative/eventing/contrib/kafka/pkg/controller" - provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller" "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/channelwatcher" @@ -41,7 +41,7 @@ func main() { if err != nil { log.Fatalf("unable to create logger: %v", err) } - provisionerConfig, err := provisionerController.GetProvisionerConfig("/etc/config-provisioner") + provisionerConfig, err := utils.GetKafkaConfig("/etc/config-provisioner") if err != nil { logger.Fatal("unable to load provisioner config", zap.Error(err)) } diff --git a/contrib/kafka/config/200-controller-clusterrole.yaml b/contrib/kafka/config/200-controller-clusterrole.yaml new file mode 100644 index 00000000000..ac144b8adff --- /dev/null +++ b/contrib/kafka/config/200-controller-clusterrole.yaml @@ -0,0 +1,87 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kafka-ch-controller +rules: + - apiGroups: + - messaging.knative.dev + resources: + - kafkachannels + - kafkachannels/status + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - "" # Core API group. + resources: + - services + - configmaps + verbs: + - get + - list + - watch + - create + - apiGroups: + - "" # Core API group. + resources: + - services + verbs: + - update + - apiGroups: + - "" # Core API Group. + resources: + - configmaps + resourceNames: + - kafka-dispatcher + verbs: + - update + - apiGroups: + - "" # Core API Group. + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" # Core API group. + resources: + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - deployments + - deployments/status + verbs: + - get + - list + - watch diff --git a/contrib/kafka/config/200-dispatcher-clusterrole.yaml b/contrib/kafka/config/200-dispatcher-clusterrole.yaml new file mode 100644 index 00000000000..b3766ef80d2 --- /dev/null +++ b/contrib/kafka/config/200-dispatcher-clusterrole.yaml @@ -0,0 +1,36 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kafka-ch-dispatcher +rules: + - apiGroups: + - messaging.knative.dev + resources: + - kafkachannels + - kafkachannels/status + verbs: + - get + - list + - watch + - apiGroups: + - "" # Core API group. + resources: + - configmaps + verbs: + - get + - list + - watch diff --git a/contrib/kafka/config/200-dispatcher-service.yaml b/contrib/kafka/config/200-dispatcher-service.yaml new file mode 100644 index 00000000000..c204c83d422 --- /dev/null +++ b/contrib/kafka/config/200-dispatcher-service.yaml @@ -0,0 +1,30 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: kafka-ch-dispatcher + namespace: knative-eventing + labels: + messaging.knative.dev/channel: kafka-channel + messaging.knative.dev/role: dispatcher +spec: + selector: + messaging.knative.dev/channel: kafka-channel + messaging.knative.dev/role: dispatcher + ports: + - port: 80 + protocol: TCP + targetPort: 8080 diff --git a/contrib/kafka/config/200-serviceaccount.yaml b/contrib/kafka/config/200-serviceaccount.yaml new file mode 100644 index 00000000000..8daa2857377 --- /dev/null +++ b/contrib/kafka/config/200-serviceaccount.yaml @@ -0,0 +1,26 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka-ch-controller + namespace: knative-eventing + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka-ch-dispatcher + namespace: knative-eventing diff --git a/contrib/kafka/config/201-clusterrolebinding.yaml b/contrib/kafka/config/201-clusterrolebinding.yaml new file mode 100644 index 00000000000..99de053e752 --- /dev/null +++ b/contrib/kafka/config/201-clusterrolebinding.yaml @@ -0,0 +1,43 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kafka-ch-controller +subjects: + - kind: ServiceAccount + name: kafka-ch-controller + namespace: knative-eventing +roleRef: + kind: ClusterRole + name: kafka-ch-controller + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kafka-ch-dispatcher + namespace: knative-eventing +subjects: + - kind: ServiceAccount + name: kafka-ch-dispatcher + namespace: knative-eventing +roleRef: + kind: ClusterRole + name: kafka-ch-dispatcher + apiGroup: rbac.authorization.k8s.io + diff --git a/contrib/kafka/config/300-kafka-channel.yaml b/contrib/kafka/config/300-kafka-channel.yaml new file mode 100644 index 00000000000..4838ad28833 --- /dev/null +++ b/contrib/kafka/config/300-kafka-channel.yaml @@ -0,0 +1,101 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: kafkachannels.messaging.knative.dev + labels: + knative.dev/crd-install: "true" +spec: + group: messaging.knative.dev + version: v1alpha1 + names: + kind: KafkaChannel + plural: kafkachannels + singular: kafkachannel + categories: + - all + - knative + - messaging + shortNames: + - kc + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type==\"Ready\")].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type==\"Ready\")].reason" + - name: Hostname + type: string + JSONPath: .status.address.hostname + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + validation: + openAPIV3Schema: + properties: + spec: + properties: + numPartitions: + format: int32 + type: integer + description: "Number of partitions of a Kafka topic." + replicationFactor: + format: int16 + type: integer + description: "Replication factor of a Kafka topic." + subscribable: + type: object + properties: + subscribers: + type: array + description: "The list of subscribers that have expressed interest in receiving events from this channel." + items: + required: + - uid + properties: + ref: + type: object + required: + - namespace + - name + - uid + properties: + apiVersion: + type: string + kind: + type: string + name: + type: string + minLength: 1 + namespace: + type: string + minLength: 1 + uid: + type: string + minLength: 1 + uid: + type: string + minLength: 1 + subscriberURI: + type: string + minLength: 1 + replyURI: + type: string + minLength: 1 diff --git a/contrib/kafka/config/400-kafka-config.yaml b/contrib/kafka/config/400-kafka-config.yaml new file mode 100644 index 00000000000..535ff48a812 --- /dev/null +++ b/contrib/kafka/config/400-kafka-config.yaml @@ -0,0 +1,28 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-kafka + namespace: knative-eventing +data: + # Broker URL. Replace this with the URLs for your kafka cluster, + # which is in the format of my-cluster-kafka-bootstrap.my-kafka-namespace:9092. + bootstrap_servers: REPLACE_WITH_CLUSTER_URL + + # Consumer mode to dispatch events from different partitions in parallel. + # By default(multiplex), partitions are multiplexed with a single go channel. + # `multiplex` and `partitions` are valid values. + ## consumer_mode: partitions diff --git a/contrib/kafka/config/500-controller.yaml b/contrib/kafka/config/500-controller.yaml new file mode 100644 index 00000000000..24ab9dc8606 --- /dev/null +++ b/contrib/kafka/config/500-controller.yaml @@ -0,0 +1,52 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-ch-controller + namespace: knative-eventing +spec: + replicas: 1 + selector: + matchLabels: &labels + messaging.knative.dev/channel: kafka-channel + messaging.knative.dev/role: controller + template: + metadata: + labels: *labels + spec: + serviceAccountName: kafka-ch-controller + containers: + - name: controller + image: github.com/knative/eventing/contrib/kafka/cmd/channel_controller + env: + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: config-logging + mountPath: /etc/config-logging + - name: config-kafka + mountPath: /etc/config-kafka + volumes: + - name: config-logging + configMap: + name: config-logging + - name: config-kafka + configMap: + name: config-kafka diff --git a/contrib/kafka/config/500-dispatcher.yaml b/contrib/kafka/config/500-dispatcher.yaml new file mode 100644 index 00000000000..a143bb59eba --- /dev/null +++ b/contrib/kafka/config/500-dispatcher.yaml @@ -0,0 +1,45 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-ch-dispatcher + namespace: knative-eventing +spec: + replicas: 1 + selector: + matchLabels: &labels + messaging.knative.dev/channel: kafka-channel + messaging.knative.dev/role: dispatcher + template: + metadata: + labels: *labels + spec: + serviceAccountName: kafka-ch-dispatcher + containers: + - name: dispatcher + image: github.com/knative/eventing/contrib/kafka/cmd/channel_dispatcher + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: config-kafka + mountPath: /etc/config-kafka + volumes: + - name: config-kafka + configMap: + name: config-kafka diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/doc.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/doc.go new file mode 100644 index 00000000000..64e1d2ec055 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 is the v1alpha1 version of the API. +// +k8s:deepcopy-gen=package +// +groupName=messaging.knative.dev +package v1alpha1 diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go new file mode 100644 index 00000000000..e563a7cf759 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults.go @@ -0,0 +1,35 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "github.com/knative/eventing/contrib/kafka/pkg/utils" +) + +func (c *KafkaChannel) SetDefaults(ctx context.Context) { + c.Spec.SetDefaults(ctx) +} + +func (cs *KafkaChannelSpec) SetDefaults(ctx context.Context) { + if cs.NumPartitions == 0 { + cs.NumPartitions = utils.DefaultNumPartitions + } + if cs.ReplicationFactor == 0 { + cs.ReplicationFactor = utils.DefaultReplicationFactor + } +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go new file mode 100644 index 00000000000..b22e88c9dae --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_defaults_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "github.com/knative/eventing/contrib/kafka/pkg/utils" + "testing" + + "github.com/google/go-cmp/cmp" +) + +const ( + testNumPartitions = 10 + testReplicationFactor = 5 +) + +func TestKafkaChannelDefaults(t *testing.T) { + testCases := map[string]struct { + initial KafkaChannel + expected KafkaChannel + }{ + "nil spec": { + initial: KafkaChannel{}, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: utils.DefaultNumPartitions, + ReplicationFactor: utils.DefaultReplicationFactor, + }, + }, + }, + "numPartitions not set": { + initial: KafkaChannel{ + Spec: KafkaChannelSpec{ + ReplicationFactor: testReplicationFactor, + }, + }, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: utils.DefaultNumPartitions, + ReplicationFactor: testReplicationFactor, + }, + }, + }, + "replicationFactor not set": { + initial: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: testNumPartitions, + }, + }, + expected: KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: testNumPartitions, + ReplicationFactor: utils.DefaultReplicationFactor, + }, + }, + }, + } + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + tc.initial.SetDefaults(context.TODO()) + if diff := cmp.Diff(tc.expected, tc.initial); diff != "" { + t.Fatalf("Unexpected defaults (-want, +got): %s", diff) + } + }) + } +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go new file mode 100644 index 00000000000..686544c4a3e --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle.go @@ -0,0 +1,135 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +var kc = duckv1alpha1.NewLivingConditionSet( + KafkaChannelConditionTopicReady, + KafkaChannelConditionDispatcherReady, + KafkaChannelConditionServiceReady, + KafkaChannelConditionEndpointsReady, + KafkaChannelConditionAddressable, + KafkaChannelConditionChannelServiceReady) + +const ( + // KafkaChannelConditionReady has status True when all subconditions below have been set to True. + KafkaChannelConditionReady = duckv1alpha1.ConditionReady + + // KafkaChannelConditionDispatcherReady has status True when a Dispatcher deployment is ready + // Keyed off appsv1.DeploymentAvailable, which means minimum available replicas required are up + // and running for at least minReadySeconds. + KafkaChannelConditionDispatcherReady duckv1alpha1.ConditionType = "DispatcherReady" + + // KafkaChannelConditionServiceReady has status True when a k8s Service is ready. This + // basically just means it exists because there's no meaningful status in Service. See Endpoints + // below. + KafkaChannelConditionServiceReady duckv1alpha1.ConditionType = "ServiceReady" + + // KafkaChannelConditionEndpointsReady has status True when a k8s Service Endpoints are backed + // by at least one endpoint. + KafkaChannelConditionEndpointsReady duckv1alpha1.ConditionType = "EndpointsReady" + + // KafkaChannelConditionAddressable has status true when this KafkaChannel meets + // the Addressable contract and has a non-empty hostname. + KafkaChannelConditionAddressable duckv1alpha1.ConditionType = "Addressable" + + // KafkaChannelConditionServiceReady has status True when a k8s Service representing the channel is ready. + // Because this uses ExternalName, there are no endpoints to check. + KafkaChannelConditionChannelServiceReady duckv1alpha1.ConditionType = "ChannelServiceReady" + + // KafkaChannelConditionTopicReady has status True when the Kafka topic to use by the channel exists. + KafkaChannelConditionTopicReady duckv1alpha1.ConditionType = "TopicReady" +) + +// GetCondition returns the condition currently associated with the given type, or nil. +func (cs *KafkaChannelStatus) GetCondition(t duckv1alpha1.ConditionType) *duckv1alpha1.Condition { + return kc.Manage(cs).GetCondition(t) +} + +// IsReady returns true if the resource is ready overall. +func (cs *KafkaChannelStatus) IsReady() bool { + return kc.Manage(cs).IsHappy() +} + +// InitializeConditions sets relevant unset conditions to Unknown state. +func (cs *KafkaChannelStatus) InitializeConditions() { + kc.Manage(cs).InitializeConditions() +} + +// TODO: Use the new beta duck types. +func (cs *KafkaChannelStatus) SetAddress(hostname string) { + cs.Address.Hostname = hostname + if hostname != "" { + kc.Manage(cs).MarkTrue(KafkaChannelConditionAddressable) + } else { + kc.Manage(cs).MarkFalse(KafkaChannelConditionAddressable, "EmptyHostname", "hostname is the empty string") + } +} + +func (cs *KafkaChannelStatus) MarkDispatcherFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionDispatcherReady, reason, messageFormat, messageA...) +} + +// TODO: Unify this with the ones from Eventing. Say: Broker, Trigger. +func (cs *KafkaChannelStatus) PropagateDispatcherStatus(ds *appsv1.DeploymentStatus) { + for _, cond := range ds.Conditions { + if cond.Type == appsv1.DeploymentAvailable { + if cond.Status != corev1.ConditionTrue { + cs.MarkDispatcherFailed("DispatcherNotReady", "Dispatcher Deployment is not ready: %s : %s", cond.Reason, cond.Message) + } else { + kc.Manage(cs).MarkTrue(KafkaChannelConditionDispatcherReady) + } + } + } +} + +func (cs *KafkaChannelStatus) MarkServiceFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionServiceReady, reason, messageFormat, messageA...) +} + +func (cs *KafkaChannelStatus) MarkServiceTrue() { + kc.Manage(cs).MarkTrue(KafkaChannelConditionServiceReady) +} + +func (cs *KafkaChannelStatus) MarkChannelServiceFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionChannelServiceReady, reason, messageFormat, messageA...) +} + +func (cs *KafkaChannelStatus) MarkChannelServiceTrue() { + kc.Manage(cs).MarkTrue(KafkaChannelConditionChannelServiceReady) +} + +func (cs *KafkaChannelStatus) MarkEndpointsFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionEndpointsReady, reason, messageFormat, messageA...) +} + +func (cs *KafkaChannelStatus) MarkEndpointsTrue() { + kc.Manage(cs).MarkTrue(KafkaChannelConditionEndpointsReady) +} + +func (cs *KafkaChannelStatus) MarkTopicTrue() { + kc.Manage(cs).MarkTrue(KafkaChannelConditionTopicReady) +} + +func (cs *KafkaChannelStatus) MarkTopicFailed(reason, messageFormat string, messageA ...interface{}) { + kc.Manage(cs).MarkFalse(KafkaChannelConditionTopicReady, reason, messageFormat, messageA...) +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go new file mode 100644 index 00000000000..d6a86f6582e --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_lifecycle_test.go @@ -0,0 +1,404 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +var condReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionReady, + Status: corev1.ConditionTrue, +} + +var condDispatcherReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionTrue, +} + +var condDispatcherNotReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionFalse, +} + +var condDispatcherServiceReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionServiceReady, + Status: corev1.ConditionTrue, +} + +var condDispatcherEndpointsReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionEndpointsReady, + Status: corev1.ConditionTrue, +} + +var condTopicReady = duckv1alpha1.Condition{ + Type: KafkaChannelConditionTopicReady, + Status: corev1.ConditionTrue, +} + +var condDispatcherAddressable = duckv1alpha1.Condition{ + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionTrue, +} + +var deploymentConditionReady = appsv1.DeploymentCondition{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, +} + +var deploymentConditionNotReady = appsv1.DeploymentCondition{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionFalse, +} + +var deploymentStatusReady = &appsv1.DeploymentStatus{Conditions: []appsv1.DeploymentCondition{deploymentConditionReady}} +var deploymentStatusNotReady = &appsv1.DeploymentStatus{Conditions: []appsv1.DeploymentCondition{deploymentConditionNotReady}} + +var ignoreAllButTypeAndStatus = cmpopts.IgnoreFields( + duckv1alpha1.Condition{}, + "LastTransitionTime", "Message", "Reason", "Severity") + +var ignoreLastTransitionTime = cmpopts.IgnoreFields(duckv1alpha1.Condition{}, "LastTransitionTime") + +func TestChannelGetCondition(t *testing.T) { + tests := []struct { + name string + cs *KafkaChannelStatus + condQuery duckv1alpha1.ConditionType + want *duckv1alpha1.Condition + }{{ + name: "single condition", + cs: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{ + condReady, + }, + }, + }, + condQuery: duckv1alpha1.ConditionReady, + want: &condReady, + }, { + name: "unknown condition", + cs: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{ + condReady, + condDispatcherNotReady, + }, + }, + }, + condQuery: duckv1alpha1.ConditionType("foo"), + want: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.cs.GetCondition(test.condQuery) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("unexpected condition (-want, +got) = %v", diff) + } + }) + } +} + +func TestChannelInitializeConditions(t *testing.T) { + tests := []struct { + name string + cs *KafkaChannelStatus + want *KafkaChannelStatus + }{{ + name: "empty", + cs: &KafkaChannelStatus{}, + want: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionChannelServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionEndpointsReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionTopicReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + }, { + name: "one false", + cs: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + want: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionChannelServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionFalse, + }, { + Type: KafkaChannelConditionEndpointsReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionTopicReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + }, { + name: "one true", + cs: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + want: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{{ + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionChannelServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionDispatcherReady, + Status: corev1.ConditionTrue, + }, { + Type: KafkaChannelConditionEndpointsReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionServiceReady, + Status: corev1.ConditionUnknown, + }, { + Type: KafkaChannelConditionTopicReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.cs.InitializeConditions() + if diff := cmp.Diff(test.want, test.cs, ignoreAllButTypeAndStatus); diff != "" { + t.Errorf("unexpected conditions (-want, +got) = %v", diff) + } + }) + } +} + +func TestChannelIsReady(t *testing.T) { + tests := []struct { + name string + markServiceReady bool + markChannelServiceReady bool + setAddress bool + markEndpointsReady bool + markTopicReady bool + wantReady bool + dispatcherStatus *appsv1.DeploymentStatus + }{{ + name: "all happy", + markServiceReady: true, + markChannelServiceReady: true, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + markTopicReady: true, + wantReady: true, + }, { + name: "service not ready", + markServiceReady: false, + markChannelServiceReady: false, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + markTopicReady: true, + wantReady: false, + }, { + name: "endpoints not ready", + markServiceReady: true, + markChannelServiceReady: false, + markEndpointsReady: false, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + markTopicReady: true, + wantReady: false, + }, { + name: "deployment not ready", + markServiceReady: true, + markEndpointsReady: true, + markChannelServiceReady: false, + dispatcherStatus: deploymentStatusNotReady, + setAddress: true, + markTopicReady: true, + wantReady: false, + }, { + name: "address not set", + markServiceReady: true, + markChannelServiceReady: false, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: false, + markTopicReady: true, + wantReady: false, + }, { + name: "channel service not ready", + markServiceReady: true, + markChannelServiceReady: false, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + markTopicReady: true, + wantReady: false, + }, { + name: "topic not ready", + markServiceReady: true, + markChannelServiceReady: true, + markEndpointsReady: true, + dispatcherStatus: deploymentStatusReady, + setAddress: true, + markTopicReady: false, + wantReady: false, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cs := &KafkaChannelStatus{} + cs.InitializeConditions() + if test.markServiceReady { + cs.MarkServiceTrue() + } else { + cs.MarkServiceFailed("NotReadyService", "testing") + } + if test.markChannelServiceReady { + cs.MarkChannelServiceTrue() + } else { + cs.MarkChannelServiceFailed("NotReadyChannelService", "testing") + } + if test.setAddress { + cs.SetAddress("foo.bar") + } + if test.markEndpointsReady { + cs.MarkEndpointsTrue() + } else { + cs.MarkEndpointsFailed("NotReadyEndpoints", "testing") + } + if test.dispatcherStatus != nil { + cs.PropagateDispatcherStatus(test.dispatcherStatus) + } else { + cs.MarkDispatcherFailed("NotReadyDispatcher", "testing") + } + if test.markTopicReady { + cs.MarkTopicTrue() + } else { + cs.MarkTopicFailed("NotReadyTopic", "testing") + } + got := cs.IsReady() + if test.wantReady != got { + t.Errorf("unexpected readiness: want %v, got %v", test.wantReady, got) + } + }) + } +} + +func TestKafkaChannelStatus_SetAddressable(t *testing.T) { + testCases := map[string]struct { + domainInternal string + want *KafkaChannelStatus + }{ + "empty string": { + want: &KafkaChannelStatus{ + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{ + { + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionFalse, + }, + // Note that Ready is here because when the condition is marked False, duck + // automatically sets Ready to false. + { + Type: KafkaChannelConditionReady, + Status: corev1.ConditionFalse, + }, + }, + }, + }, + }, + "has domain": { + domainInternal: "test-domain", + want: &KafkaChannelStatus{ + Address: duckv1alpha1.Addressable{ + Hostname: "test-domain", + }, + Status: duckv1alpha1.Status{ + Conditions: []duckv1alpha1.Condition{ + { + Type: KafkaChannelConditionAddressable, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + }, + } + for n, tc := range testCases { + t.Run(n, func(t *testing.T) { + cs := &KafkaChannelStatus{} + cs.SetAddress(tc.domainInternal) + if diff := cmp.Diff(tc.want, cs, ignoreAllButTypeAndStatus); diff != "" { + t.Errorf("unexpected conditions (-want, +got) = %v", diff) + } + }) + } +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go new file mode 100644 index 00000000000..d8b6f773118 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_types.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + "github.com/knative/pkg/apis" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + "github.com/knative/pkg/webhook" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KafkaChannel is a resource representing a Kafka Channel. +type KafkaChannel struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of the Channel. + Spec KafkaChannelSpec `json:"spec,omitempty"` + + // Status represents the current state of the KafkaChannel. This data may be out of + // date. + // +optional + Status KafkaChannelStatus `json:"status,omitempty"` +} + +// Check that Channel can be validated, can be defaulted, and has immutable fields. +var _ apis.Validatable = (*KafkaChannel)(nil) +var _ apis.Defaultable = (*KafkaChannel)(nil) +var _ runtime.Object = (*KafkaChannel)(nil) +var _ webhook.GenericCRD = (*KafkaChannel)(nil) + +// KafkaChannelSpec defines the specification for a KafkaChannel. +type KafkaChannelSpec struct { + // NumPartitions is the number of partitions of a Kafka topic. By default, it is set to 1. + NumPartitions int32 `json:"numPartitions"` + + // ReplicationFactor is the replication factor of a Kafka topic. By default, it is set to 1. + ReplicationFactor int16 `json:"replicationFactor"` + + // KafkaChannel conforms to Duck type Subscribable. + Subscribable *eventingduck.Subscribable `json:"subscribable,omitempty"` +} + +// KafkaChannelStatus represents the current state of a KafkaChannel. +type KafkaChannelStatus struct { + // inherits duck/v1alpha1 Status, which currently provides: + // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller. + // * Conditions - the latest available observations of a resource's current state. + duckv1alpha1.Status `json:",inline"` + + // KafkaChannel is Addressable. It currently exposes the endpoint as a + // fully-qualified DNS name which will distribute traffic over the + // provided targets from inside the cluster. + // + // It generally has the form {channel}.{namespace}.svc.{cluster domain name} + Address duckv1alpha1.Addressable `json:"address,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KafkaChannelList is a collection of KafkaChannels. +type KafkaChannelList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []KafkaChannel `json:"items"` +} + +// GetGroupVersionKind returns GroupVersionKind for KafkaChannels +func (c *KafkaChannel) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("KafkaChannel") +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go new file mode 100644 index 00000000000..c29e10d2f7b --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + "github.com/knative/pkg/apis" +) + +func (c *KafkaChannel) Validate(ctx context.Context) *apis.FieldError { + return c.Spec.Validate(ctx).ViaField("spec") +} + +func (cs *KafkaChannelSpec) Validate(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + + if cs.NumPartitions <= 0 { + fe := apis.ErrInvalidValue(cs.NumPartitions, "numPartitions") + errs = errs.Also(fe) + } + + if cs.ReplicationFactor <= 0 { + fe := apis.ErrInvalidValue(cs.ReplicationFactor, "replicationFactor") + errs = errs.Also(fe) + } + + if cs.Subscribable != nil { + for i, subscriber := range cs.Subscribable.Subscribers { + if subscriber.ReplyURI == "" && subscriber.SubscriberURI == "" { + fe := apis.ErrMissingField("replyURI", "subscriberURI") + fe.Details = "expected at least one of, got none" + errs = errs.Also(fe.ViaField(fmt.Sprintf("subscriber[%d]", i)).ViaField("subscribable")) + } + } + } + return errs +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go new file mode 100644 index 00000000000..ba378fb41b4 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go @@ -0,0 +1,134 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "github.com/google/go-cmp/cmp" + "github.com/knative/pkg/webhook" + "testing" + + eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + "github.com/knative/pkg/apis" +) + +func TestKafkaChannelValidation(t *testing.T) { + testCases := map[string]struct { + cr webhook.GenericCRD + want *apis.FieldError + }{ + "empty spec": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{}, + }, + want: func() *apis.FieldError { + var errs *apis.FieldError + fe := apis.ErrInvalidValue(0, "spec.numPartitions") + errs = errs.Also(fe) + fe = apis.ErrInvalidValue(0, "spec.replicationFactor") + errs = errs.Also(fe) + return errs + }(), + }, + "negative numPartitions": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: -10, + ReplicationFactor: 1, + }, + }, + want: func() *apis.FieldError { + fe := apis.ErrInvalidValue(-10, "spec.numPartitions") + return fe + }(), + }, + "negative replicationFactor": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: 1, + ReplicationFactor: -10, + }, + }, + want: func() *apis.FieldError { + fe := apis.ErrInvalidValue(-10, "spec.replicationFactor") + return fe + }(), + }, + "valid subscribers array": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: 1, + ReplicationFactor: 1, + Subscribable: &eventingduck.Subscribable{ + Subscribers: []eventingduck.ChannelSubscriberSpec{{ + SubscriberURI: "subscriberendpoint", + ReplyURI: "resultendpoint", + }}, + }}, + }, + want: nil, + }, + "empty subscriber at index 1": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: 1, + ReplicationFactor: 1, + Subscribable: &eventingduck.Subscribable{ + Subscribers: []eventingduck.ChannelSubscriberSpec{{ + SubscriberURI: "subscriberendpoint", + ReplyURI: "replyendpoint", + }, {}}, + }}, + }, + want: func() *apis.FieldError { + fe := apis.ErrMissingField("spec.subscribable.subscriber[1].replyURI", "spec.subscribable.subscriber[1].subscriberURI") + fe.Details = "expected at least one of, got none" + return fe + }(), + }, + "two empty subscribers": { + cr: &KafkaChannel{ + Spec: KafkaChannelSpec{ + NumPartitions: 1, + ReplicationFactor: 1, + Subscribable: &eventingduck.Subscribable{ + Subscribers: []eventingduck.ChannelSubscriberSpec{{}, {}}, + }, + }, + }, + want: func() *apis.FieldError { + var errs *apis.FieldError + fe := apis.ErrMissingField("spec.subscribable.subscriber[0].replyURI", "spec.subscribable.subscriber[0].subscriberURI") + fe.Details = "expected at least one of, got none" + errs = errs.Also(fe) + fe = apis.ErrMissingField("spec.subscribable.subscriber[1].replyURI", "spec.subscribable.subscriber[1].subscriberURI") + fe.Details = "expected at least one of, got none" + errs = errs.Also(fe) + return errs + }(), + }, + } + + for n, test := range testCases { + t.Run(n, func(t *testing.T) { + got := test.cr.Validate(context.Background()) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("%s: validate (-want, +got) = %v", n, diff) + } + }) + } +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go new file mode 100644 index 00000000000..ba2eb8d3883 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + groupName = "messaging.knative.dev" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: groupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KafkaChannel{}, + &KafkaChannelList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/zz_generated.deepcopy.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..bc991679170 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,126 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + duckv1alpha1 "github.com/knative/eventing/pkg/apis/duck/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannel) DeepCopyInto(out *KafkaChannel) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannel. +func (in *KafkaChannel) DeepCopy() *KafkaChannel { + if in == nil { + return nil + } + out := new(KafkaChannel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaChannel) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannelList) DeepCopyInto(out *KafkaChannelList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KafkaChannel, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannelList. +func (in *KafkaChannelList) DeepCopy() *KafkaChannelList { + if in == nil { + return nil + } + out := new(KafkaChannelList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaChannelList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannelSpec) DeepCopyInto(out *KafkaChannelSpec) { + *out = *in + if in.Subscribable != nil { + in, out := &in.Subscribable, &out.Subscribable + *out = new(duckv1alpha1.Subscribable) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannelSpec. +func (in *KafkaChannelSpec) DeepCopy() *KafkaChannelSpec { + if in == nil { + return nil + } + out := new(KafkaChannelSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaChannelStatus) DeepCopyInto(out *KafkaChannelStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + in.Address.DeepCopyInto(&out.Address) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaChannelStatus. +func (in *KafkaChannelStatus) DeepCopy() *KafkaChannelStatus { + if in == nil { + return nil + } + out := new(KafkaChannelStatus) + in.DeepCopyInto(out) + return out +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/clientset.go b/contrib/kafka/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 00000000000..bf01dfec9fa --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + MessagingV1alpha1() messagingv1alpha1.MessagingV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Messaging() messagingv1alpha1.MessagingV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + messagingV1alpha1 *messagingv1alpha1.MessagingV1alpha1Client +} + +// MessagingV1alpha1 retrieves the MessagingV1alpha1Client +func (c *Clientset) MessagingV1alpha1() messagingv1alpha1.MessagingV1alpha1Interface { + return c.messagingV1alpha1 +} + +// Deprecated: Messaging retrieves the default version of MessagingClient. +// Please explicitly pick a version. +func (c *Clientset) Messaging() messagingv1alpha1.MessagingV1alpha1Interface { + return c.messagingV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.messagingV1alpha1, err = messagingv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.messagingV1alpha1 = messagingv1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.messagingV1alpha1 = messagingv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/doc.go b/contrib/kafka/pkg/client/clientset/versioned/doc.go new file mode 100644 index 00000000000..1122e50bfc3 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/contrib/kafka/pkg/client/clientset/versioned/fake/clientset_generated.go b/contrib/kafka/pkg/client/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 00000000000..2cd9095901a --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1" + fakemessagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +var _ clientset.Interface = &Clientset{} + +// MessagingV1alpha1 retrieves the MessagingV1alpha1Client +func (c *Clientset) MessagingV1alpha1() messagingv1alpha1.MessagingV1alpha1Interface { + return &fakemessagingv1alpha1.FakeMessagingV1alpha1{Fake: &c.Fake} +} + +// Messaging retrieves the MessagingV1alpha1Client +func (c *Clientset) Messaging() messagingv1alpha1.MessagingV1alpha1Interface { + return &fakemessagingv1alpha1.FakeMessagingV1alpha1{Fake: &c.Fake} +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/fake/doc.go b/contrib/kafka/pkg/client/clientset/versioned/fake/doc.go new file mode 100644 index 00000000000..87f3c3e0b01 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/contrib/kafka/pkg/client/clientset/versioned/fake/register.go b/contrib/kafka/pkg/client/clientset/versioned/fake/register.go new file mode 100644 index 00000000000..d8716c25725 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/fake/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + messagingv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/scheme/doc.go b/contrib/kafka/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 00000000000..7d76538485b --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/contrib/kafka/pkg/client/clientset/versioned/scheme/register.go b/contrib/kafka/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 00000000000..655d74d7620 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + messagingv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/doc.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/doc.go new file mode 100644 index 00000000000..a1c6bb9fe8f --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/doc.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/doc.go new file mode 100644 index 00000000000..a00e5d7b21a --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_kafkachannel.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_kafkachannel.go new file mode 100644 index 00000000000..4a7fad7f9ae --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_kafkachannel.go @@ -0,0 +1,140 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKafkaChannels implements KafkaChannelInterface +type FakeKafkaChannels struct { + Fake *FakeMessagingV1alpha1 + ns string +} + +var kafkachannelsResource = schema.GroupVersionResource{Group: "messaging.knative.dev", Version: "v1alpha1", Resource: "kafkachannels"} + +var kafkachannelsKind = schema.GroupVersionKind{Group: "messaging.knative.dev", Version: "v1alpha1", Kind: "KafkaChannel"} + +// Get takes name of the kafkaChannel, and returns the corresponding kafkaChannel object, and an error if there is any. +func (c *FakeKafkaChannels) Get(name string, options v1.GetOptions) (result *v1alpha1.KafkaChannel, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(kafkachannelsResource, c.ns, name), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} + +// List takes label and field selectors, and returns the list of KafkaChannels that match those selectors. +func (c *FakeKafkaChannels) List(opts v1.ListOptions) (result *v1alpha1.KafkaChannelList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(kafkachannelsResource, kafkachannelsKind, c.ns, opts), &v1alpha1.KafkaChannelList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.KafkaChannelList{ListMeta: obj.(*v1alpha1.KafkaChannelList).ListMeta} + for _, item := range obj.(*v1alpha1.KafkaChannelList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kafkaChannels. +func (c *FakeKafkaChannels) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(kafkachannelsResource, c.ns, opts)) + +} + +// Create takes the representation of a kafkaChannel and creates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *FakeKafkaChannels) Create(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(kafkachannelsResource, c.ns, kafkaChannel), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} + +// Update takes the representation of a kafkaChannel and updates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *FakeKafkaChannels) Update(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(kafkachannelsResource, c.ns, kafkaChannel), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKafkaChannels) UpdateStatus(kafkaChannel *v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(kafkachannelsResource, "status", c.ns, kafkaChannel), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} + +// Delete takes name of the kafkaChannel and deletes it. Returns an error if one occurs. +func (c *FakeKafkaChannels) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(kafkachannelsResource, c.ns, name), &v1alpha1.KafkaChannel{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKafkaChannels) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(kafkachannelsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.KafkaChannelList{}) + return err +} + +// Patch applies the patch and returns the patched kafkaChannel. +func (c *FakeKafkaChannels) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.KafkaChannel, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(kafkachannelsResource, c.ns, name, data, subresources...), &v1alpha1.KafkaChannel{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaChannel), err +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_messaging_client.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_messaging_client.go new file mode 100644 index 00000000000..220a1e3cb47 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/fake/fake_messaging_client.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeMessagingV1alpha1 struct { + *testing.Fake +} + +func (c *FakeMessagingV1alpha1) KafkaChannels(namespace string) v1alpha1.KafkaChannelInterface { + return &FakeKafkaChannels{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeMessagingV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/generated_expansion.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/generated_expansion.go new file mode 100644 index 00000000000..5b2dec5a0f5 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type KafkaChannelExpansion interface{} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/kafkachannel.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/kafkachannel.go new file mode 100644 index 00000000000..d7ad9d39cb7 --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/kafkachannel.go @@ -0,0 +1,174 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + scheme "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KafkaChannelsGetter has a method to return a KafkaChannelInterface. +// A group's client should implement this interface. +type KafkaChannelsGetter interface { + KafkaChannels(namespace string) KafkaChannelInterface +} + +// KafkaChannelInterface has methods to work with KafkaChannel resources. +type KafkaChannelInterface interface { + Create(*v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) + Update(*v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) + UpdateStatus(*v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.KafkaChannel, error) + List(opts v1.ListOptions) (*v1alpha1.KafkaChannelList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.KafkaChannel, err error) + KafkaChannelExpansion +} + +// kafkaChannels implements KafkaChannelInterface +type kafkaChannels struct { + client rest.Interface + ns string +} + +// newKafkaChannels returns a KafkaChannels +func newKafkaChannels(c *MessagingV1alpha1Client, namespace string) *kafkaChannels { + return &kafkaChannels{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the kafkaChannel, and returns the corresponding kafkaChannel object, and an error if there is any. +func (c *kafkaChannels) Get(name string, options v1.GetOptions) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kafkachannels"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KafkaChannels that match those selectors. +func (c *kafkaChannels) List(opts v1.ListOptions) (result *v1alpha1.KafkaChannelList, err error) { + result = &v1alpha1.KafkaChannelList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kafkachannels"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kafkaChannels. +func (c *kafkaChannels) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("kafkachannels"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a kafkaChannel and creates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *kafkaChannels) Create(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Post(). + Namespace(c.ns). + Resource("kafkachannels"). + Body(kafkaChannel). + Do(). + Into(result) + return +} + +// Update takes the representation of a kafkaChannel and updates it. Returns the server's representation of the kafkaChannel, and an error, if there is any. +func (c *kafkaChannels) Update(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kafkachannels"). + Name(kafkaChannel.Name). + Body(kafkaChannel). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *kafkaChannels) UpdateStatus(kafkaChannel *v1alpha1.KafkaChannel) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kafkachannels"). + Name(kafkaChannel.Name). + SubResource("status"). + Body(kafkaChannel). + Do(). + Into(result) + return +} + +// Delete takes name of the kafkaChannel and deletes it. Returns an error if one occurs. +func (c *kafkaChannels) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kafkachannels"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kafkaChannels) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kafkachannels"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched kafkaChannel. +func (c *kafkaChannels) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.KafkaChannel, err error) { + result = &v1alpha1.KafkaChannel{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("kafkachannels"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/messaging_client.go b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/messaging_client.go new file mode 100644 index 00000000000..203bea4573b --- /dev/null +++ b/contrib/kafka/pkg/client/clientset/versioned/typed/messaging/v1alpha1/messaging_client.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type MessagingV1alpha1Interface interface { + RESTClient() rest.Interface + KafkaChannelsGetter +} + +// MessagingV1alpha1Client is used to interact with features provided by the messaging.knative.dev group. +type MessagingV1alpha1Client struct { + restClient rest.Interface +} + +func (c *MessagingV1alpha1Client) KafkaChannels(namespace string) KafkaChannelInterface { + return newKafkaChannels(c, namespace) +} + +// NewForConfig creates a new MessagingV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*MessagingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &MessagingV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new MessagingV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *MessagingV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new MessagingV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *MessagingV1alpha1Client { + return &MessagingV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *MessagingV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/factory.go b/contrib/kafka/pkg/client/informers/externalversions/factory.go new file mode 100644 index 00000000000..66992469f3f --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/factory.go @@ -0,0 +1,180 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces" + messaging "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Messaging() messaging.Interface +} + +func (f *sharedInformerFactory) Messaging() messaging.Interface { + return messaging.New(f, f.namespace, f.tweakListOptions) +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/generic.go b/contrib/kafka/pkg/client/informers/externalversions/generic.go new file mode 100644 index 00000000000..761bf80c064 --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/generic.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=messaging.knative.dev, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("kafkachannels"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Messaging().V1alpha1().KafkaChannels().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 00000000000..644293f3f79 --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/contrib/kafka/pkg/client/informers/externalversions/messaging/interface.go b/contrib/kafka/pkg/client/informers/externalversions/messaging/interface.go new file mode 100644 index 00000000000..2a2a4e5ecfc --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/messaging/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package messaging + +import ( + internalinterfaces "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/interface.go b/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/interface.go new file mode 100644 index 00000000000..9e09a032739 --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // KafkaChannels returns a KafkaChannelInformer. + KafkaChannels() KafkaChannelInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// KafkaChannels returns a KafkaChannelInformer. +func (v *version) KafkaChannels() KafkaChannelInformer { + return &kafkaChannelInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/kafkachannel.go b/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/kafkachannel.go new file mode 100644 index 00000000000..ddd6ae98f8c --- /dev/null +++ b/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1/kafkachannel.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + messagingv1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + versioned "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/client/listers/messaging/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// KafkaChannelInformer provides access to a shared informer and lister for +// KafkaChannels. +type KafkaChannelInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.KafkaChannelLister +} + +type kafkaChannelInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewKafkaChannelInformer constructs a new informer for KafkaChannel type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewKafkaChannelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredKafkaChannelInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredKafkaChannelInformer constructs a new informer for KafkaChannel type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredKafkaChannelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MessagingV1alpha1().KafkaChannels(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MessagingV1alpha1().KafkaChannels(namespace).Watch(options) + }, + }, + &messagingv1alpha1.KafkaChannel{}, + resyncPeriod, + indexers, + ) +} + +func (f *kafkaChannelInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredKafkaChannelInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *kafkaChannelInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&messagingv1alpha1.KafkaChannel{}, f.defaultInformer) +} + +func (f *kafkaChannelInformer) Lister() v1alpha1.KafkaChannelLister { + return v1alpha1.NewKafkaChannelLister(f.Informer().GetIndexer()) +} diff --git a/contrib/kafka/pkg/client/listers/messaging/v1alpha1/expansion_generated.go b/contrib/kafka/pkg/client/listers/messaging/v1alpha1/expansion_generated.go new file mode 100644 index 00000000000..d45c47feb0d --- /dev/null +++ b/contrib/kafka/pkg/client/listers/messaging/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// KafkaChannelListerExpansion allows custom methods to be added to +// KafkaChannelLister. +type KafkaChannelListerExpansion interface{} + +// KafkaChannelNamespaceListerExpansion allows custom methods to be added to +// KafkaChannelNamespaceLister. +type KafkaChannelNamespaceListerExpansion interface{} diff --git a/contrib/kafka/pkg/client/listers/messaging/v1alpha1/kafkachannel.go b/contrib/kafka/pkg/client/listers/messaging/v1alpha1/kafkachannel.go new file mode 100644 index 00000000000..2401d9dbfe2 --- /dev/null +++ b/contrib/kafka/pkg/client/listers/messaging/v1alpha1/kafkachannel.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// KafkaChannelLister helps list KafkaChannels. +type KafkaChannelLister interface { + // List lists all KafkaChannels in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.KafkaChannel, err error) + // KafkaChannels returns an object that can list and get KafkaChannels. + KafkaChannels(namespace string) KafkaChannelNamespaceLister + KafkaChannelListerExpansion +} + +// kafkaChannelLister implements the KafkaChannelLister interface. +type kafkaChannelLister struct { + indexer cache.Indexer +} + +// NewKafkaChannelLister returns a new KafkaChannelLister. +func NewKafkaChannelLister(indexer cache.Indexer) KafkaChannelLister { + return &kafkaChannelLister{indexer: indexer} +} + +// List lists all KafkaChannels in the indexer. +func (s *kafkaChannelLister) List(selector labels.Selector) (ret []*v1alpha1.KafkaChannel, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KafkaChannel)) + }) + return ret, err +} + +// KafkaChannels returns an object that can list and get KafkaChannels. +func (s *kafkaChannelLister) KafkaChannels(namespace string) KafkaChannelNamespaceLister { + return kafkaChannelNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// KafkaChannelNamespaceLister helps list and get KafkaChannels. +type KafkaChannelNamespaceLister interface { + // List lists all KafkaChannels in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.KafkaChannel, err error) + // Get retrieves the KafkaChannel from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.KafkaChannel, error) + KafkaChannelNamespaceListerExpansion +} + +// kafkaChannelNamespaceLister implements the KafkaChannelNamespaceLister +// interface. +type kafkaChannelNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all KafkaChannels in the indexer for a given namespace. +func (s kafkaChannelNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.KafkaChannel, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KafkaChannel)) + }) + return ret, err +} + +// Get retrieves the KafkaChannel from the indexer for a given namespace and name. +func (s kafkaChannelNamespaceLister) Get(name string) (*v1alpha1.KafkaChannel, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("kafkachannel"), name) + } + return obj.(*v1alpha1.KafkaChannel), nil +} diff --git a/contrib/kafka/pkg/controller/channel/provider.go b/contrib/kafka/pkg/controller/channel/provider.go index 73eab2e8d22..b55a956c9a1 100644 --- a/contrib/kafka/pkg/controller/channel/provider.go +++ b/contrib/kafka/pkg/controller/channel/provider.go @@ -30,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" common "github.com/knative/eventing/contrib/kafka/pkg/controller" + "github.com/knative/eventing/contrib/kafka/pkg/utils" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/pkg/system" ) @@ -51,7 +52,7 @@ type reconciler struct { client client.Client recorder record.EventRecorder logger *zap.Logger - config *common.KafkaProvisionerConfig + config *utils.KafkaConfig // Using a shared kafkaClusterAdmin does not work currently because of an issue with // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. kafkaClusterAdmin sarama.ClusterAdmin @@ -61,7 +62,7 @@ type reconciler struct { var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Channel controller. -func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { +func ProvideController(mgr manager.Manager, config *utils.KafkaConfig, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Channel. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ diff --git a/contrib/kafka/pkg/controller/channel/reconcile.go b/contrib/kafka/pkg/controller/channel/reconcile.go index 34a7e1c9b71..e1ef6119518 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile.go +++ b/contrib/kafka/pkg/controller/channel/reconcile.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "github.com/Shopify/sarama" "go.uber.org/zap" @@ -38,12 +39,6 @@ import ( const ( finalizerName = controllerAgentName - // DefaultNumPartitions defines the default number of partitions - DefaultNumPartitions = 1 - - // DefaultReplicationFactor defines the default number of replications - DefaultReplicationFactor = 1 - // Name of the corev1.Events emitted from the reconciliation process dispatcherReconcileFailed = "DispatcherReconcileFailed" dispatcherUpdateStatusFailed = "DispatcherUpdateStatusFailed" @@ -185,7 +180,7 @@ func (r *reconciler) shouldReconcile(channel *eventingv1alpha1.Channel, clusterC } func (r *reconciler) provisionChannel(channel *eventingv1alpha1.Channel, kafkaClusterAdmin sarama.ClusterAdmin) error { - topicName := topicUtils.TopicName(controller.KafkaChannelSeparator, channel.Namespace, channel.Name) + topicName := topicUtils.TopicName(utils.KafkaChannelSeparator, channel.Namespace, channel.Name) r.logger.Info("creating topic on kafka cluster", zap.String("topic", topicName)) var arguments channelArgs @@ -199,11 +194,11 @@ func (r *reconciler) provisionChannel(channel *eventingv1alpha1.Channel, kafkaCl } if arguments.NumPartitions == 0 { - arguments.NumPartitions = DefaultNumPartitions + arguments.NumPartitions = utils.DefaultNumPartitions } if arguments.ReplicationFactor == 0 { - arguments.ReplicationFactor = DefaultReplicationFactor + arguments.ReplicationFactor = utils.DefaultReplicationFactor } err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ @@ -221,7 +216,7 @@ func (r *reconciler) provisionChannel(channel *eventingv1alpha1.Channel, kafkaCl } func (r *reconciler) deprovisionChannel(channel *eventingv1alpha1.Channel, kafkaClusterAdmin sarama.ClusterAdmin) error { - topicName := topicUtils.TopicName(controller.KafkaChannelSeparator, channel.Namespace, channel.Name) + topicName := topicUtils.TopicName(utils.KafkaChannelSeparator, channel.Namespace, channel.Name) r.logger.Info("deleting topic on kafka cluster", zap.String("topic", topicName)) err := kafkaClusterAdmin.DeleteTopic(topicName) @@ -246,7 +241,7 @@ func (r *reconciler) getClusterChannelProvisioner() (*eventingv1alpha1.ClusterCh return clusterChannelProvisioner, nil } -func createKafkaAdminClient(config *controller.KafkaProvisionerConfig) (sarama.ClusterAdmin, error) { +func createKafkaAdminClient(config *utils.KafkaConfig) (sarama.ClusterAdmin, error) { saramaConf := sarama.NewConfig() saramaConf.Version = sarama.V1_1_0_0 saramaConf.ClientID = controllerAgentName diff --git a/contrib/kafka/pkg/controller/channel/reconcile_test.go b/contrib/kafka/pkg/controller/channel/reconcile_test.go index 02836e06a54..036f058536b 100644 --- a/contrib/kafka/pkg/controller/channel/reconcile_test.go +++ b/contrib/kafka/pkg/controller/channel/reconcile_test.go @@ -26,7 +26,7 @@ import ( "github.com/Shopify/sarama" "github.com/google/go-cmp/cmp" - "github.com/knative/eventing/contrib/kafka/pkg/controller" + . "github.com/knative/eventing/contrib/kafka/pkg/utils" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/provisioners" util "github.com/knative/eventing/pkg/provisioners" @@ -531,8 +531,8 @@ func om(namespace, name string) metav1.ObjectMeta { } } -func getControllerConfig() *controller.KafkaProvisionerConfig { - return &controller.KafkaProvisionerConfig{ +func getControllerConfig() *KafkaConfig { + return &KafkaConfig{ Brokers: []string{"test-broker"}, } } diff --git a/contrib/kafka/pkg/controller/provider.go b/contrib/kafka/pkg/controller/provider.go index 0f6ca5631f2..2e0dec6a031 100644 --- a/contrib/kafka/pkg/controller/provider.go +++ b/contrib/kafka/pkg/controller/provider.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "github.com/knative/eventing/contrib/kafka/pkg/utils" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" @@ -45,14 +46,14 @@ type reconciler struct { client client.Client recorder record.EventRecorder logger *zap.Logger - config *KafkaProvisionerConfig + config *utils.KafkaConfig } // Verify the struct implements reconcile.Reconciler var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Provisioner controller. -func ProvideController(mgr manager.Manager, config *KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { +func ProvideController(mgr manager.Manager, config *utils.KafkaConfig, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Provisioners. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ diff --git a/contrib/kafka/pkg/controller/reconcile_test.go b/contrib/kafka/pkg/controller/reconcile_test.go index 6e69b6aa57a..72c3b63fcc4 100644 --- a/contrib/kafka/pkg/controller/reconcile_test.go +++ b/contrib/kafka/pkg/controller/reconcile_test.go @@ -19,6 +19,7 @@ package controller import ( "context" "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "testing" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" @@ -195,8 +196,8 @@ func om(namespace, name string) metav1.ObjectMeta { } } -func getControllerConfig() *KafkaProvisionerConfig { - return &KafkaProvisionerConfig{ +func getControllerConfig() *utils.KafkaConfig { + return &utils.KafkaConfig{ Brokers: []string{"test-broker"}, } } diff --git a/contrib/kafka/pkg/controller/types.go b/contrib/kafka/pkg/controller/types.go deleted file mode 100644 index f0384e28402..00000000000 --- a/contrib/kafka/pkg/controller/types.go +++ /dev/null @@ -1,8 +0,0 @@ -package controller - -import cluster "github.com/bsm/sarama-cluster" - -type KafkaProvisionerConfig struct { - Brokers []string - ConsumerMode cluster.ConsumerMode -} diff --git a/contrib/kafka/pkg/controller/util.go b/contrib/kafka/pkg/controller/util.go deleted file mode 100644 index 7b3a56d448e..00000000000 --- a/contrib/kafka/pkg/controller/util.go +++ /dev/null @@ -1,59 +0,0 @@ -package controller - -import ( - "fmt" - "log" - "strings" - - cluster "github.com/bsm/sarama-cluster" - - "github.com/knative/pkg/configmap" -) - -const ( - BrokerConfigMapKey = "bootstrap_servers" - ConsumerModeConfigMapKey = "consumer_mode" - ConsumerModePartitionConsumerValue = "partitions" - ConsumerModeMultiplexConsumerValue = "multiplex" - KafkaChannelSeparator = "." -) - -// GetProvisionerConfig returns the details of the associated ClusterChannelProvisioner object -func GetProvisionerConfig(path string) (*KafkaProvisionerConfig, error) { - configMap, err := configmap.Load(path) - if err != nil { - return nil, fmt.Errorf("error loading provisioner configuration: %s", err) - } - - if len(configMap) == 0 { - return nil, fmt.Errorf("missing provisioner configuration") - } - - config := &KafkaProvisionerConfig{} - - if brokers, ok := configMap[BrokerConfigMapKey]; ok { - bootstrapServers := strings.Split(brokers, ",") - for _, s := range bootstrapServers { - if len(s) == 0 { - return nil, fmt.Errorf("empty %s value in provisioner configuration", BrokerConfigMapKey) - } - } - config.Brokers = bootstrapServers - } else { - return nil, fmt.Errorf("missing key %s in provisioner configuration", BrokerConfigMapKey) - } - - config.ConsumerMode = cluster.ConsumerModeMultiplex - if mode, ok := configMap[ConsumerModeConfigMapKey]; ok { - switch strings.ToLower(mode) { - case ConsumerModeMultiplexConsumerValue: - config.ConsumerMode = cluster.ConsumerModeMultiplex - case ConsumerModePartitionConsumerValue: - config.ConsumerMode = cluster.ConsumerModePartitions - default: - log.Printf("consumer_mode: %q is invalid. Using default mode %q", mode, ConsumerModeMultiplexConsumerValue) - config.ConsumerMode = cluster.ConsumerModeMultiplex - } - } - return config, nil -} diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index 7edaf20ef6d..47cde09d3fe 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -18,6 +18,7 @@ package dispatcher import ( "errors" "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "sync" "sync/atomic" @@ -198,7 +199,7 @@ func (d *KafkaDispatcher) Start(stopCh <-chan struct{}) error { func (d *KafkaDispatcher) subscribe(channelRef provisioners.ChannelReference, sub subscription) error { d.logger.Info("Subscribing", zap.Any("channelRef", channelRef), zap.Any("subscription", sub)) - topicName := topicUtils.TopicName(controller.KafkaChannelSeparator, channelRef.Namespace, channelRef.Name) + topicName := topicUtils.TopicName(utils.KafkaChannelSeparator, channelRef.Namespace, channelRef.Name) group := fmt.Sprintf("%s.%s", controller.Name, sub.UID) consumer, err := d.kafkaCluster.NewConsumer(group, []string{topicName}) @@ -362,7 +363,7 @@ func fromKafkaMessage(kafkaMessage *sarama.ConsumerMessage) *provisioners.Messag func toKafkaMessage(channel provisioners.ChannelReference, message *provisioners.Message) *sarama.ProducerMessage { kafkaMessage := sarama.ProducerMessage{ - Topic: topicUtils.TopicName(controller.KafkaChannelSeparator, channel.Namespace, channel.Name), + Topic: topicUtils.TopicName(utils.KafkaChannelSeparator, channel.Namespace, channel.Name), Value: sarama.ByteEncoder(message.Payload), } for h, v := range message.Headers { diff --git a/contrib/kafka/pkg/reconciler/kafkachannel.go b/contrib/kafka/pkg/reconciler/kafkachannel.go new file mode 100644 index 00000000000..d45eef0e0bb --- /dev/null +++ b/contrib/kafka/pkg/reconciler/kafkachannel.go @@ -0,0 +1,462 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "context" + "encoding/json" + "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/utils" + "github.com/knative/eventing/pkg/reconciler/names" + "reflect" + "time" + + "github.com/Shopify/sarama" + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + messaginginformers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1" + listers "github.com/knative/eventing/contrib/kafka/pkg/client/listers/messaging/v1alpha1" + "github.com/knative/eventing/contrib/kafka/pkg/reconciler/resources" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/reconciler" + "github.com/knative/pkg/controller" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + appsv1informers "k8s.io/client-go/informers/apps/v1" + corev1informers "k8s.io/client-go/informers/core/v1" + appsv1listers "k8s.io/client-go/listers/apps/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" +) + +const ( + // ReconcilerName is the name of the reconciler. + ReconcilerName = "KafkaChannels" + + // controllerAgentName is the string used by this controller to identify + // itself when creating events. + controllerAgentName = "kafka-controller" + + finalizerName = controllerAgentName + + // Name of the corev1.Events emitted from the reconciliation process. + channelReconciled = "ChannelReconciled" + channelReconcileFailed = "ChannelReconcileFailed" + channelUpdateStatusFailed = "ChannelUpdateStatusFailed" +) + +// Reconciler reconciles Kafka Channels. +type Reconciler struct { + *reconciler.Base + + dispatcherNamespace string + dispatcherDeploymentName string + dispatcherServiceName string + + kafkaConfig *utils.KafkaConfig + + // Using a shared kafkaClusterAdmin does not work currently because of an issue with + // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. + kafkaClusterAdmin sarama.ClusterAdmin + + eventingClientSet *versioned.Clientset + kafkachannelLister listers.KafkaChannelLister + kafkachannelInformer cache.SharedIndexInformer + deploymentLister appsv1listers.DeploymentLister + serviceLister corev1listers.ServiceLister + endpointsLister corev1listers.EndpointsLister + impl *controller.Impl +} + +var ( + deploymentGVK = appsv1.SchemeGroupVersion.WithKind("Deployment") + serviceGVK = corev1.SchemeGroupVersion.WithKind("Service") +) + +// Check that our Reconciler implements controller.Reconciler. +var _ controller.Reconciler = (*Reconciler)(nil) + +// Check that our Reconciler implements cache.ResourceEventHandler +var _ cache.ResourceEventHandler = (*Reconciler)(nil) + +// NewController initializes the controller and is called by the generated code. +// Registers event handlers to enqueue events. +func NewController( + opt reconciler.Options, + eventingClientSet *versioned.Clientset, + kafkaConfig *utils.KafkaConfig, + dispatcherNamespace string, + dispatcherDeploymentName string, + dispatcherServiceName string, + kafkachannelInformer messaginginformers.KafkaChannelInformer, + deploymentInformer appsv1informers.DeploymentInformer, + serviceInformer corev1informers.ServiceInformer, + endpointsInformer corev1informers.EndpointsInformer, +) *controller.Impl { + + r := &Reconciler{ + Base: reconciler.NewBase(opt, controllerAgentName), + dispatcherNamespace: dispatcherNamespace, + dispatcherDeploymentName: dispatcherDeploymentName, + dispatcherServiceName: dispatcherServiceName, + kafkaConfig: kafkaConfig, + eventingClientSet: eventingClientSet, + kafkachannelLister: kafkachannelInformer.Lister(), + kafkachannelInformer: kafkachannelInformer.Informer(), + deploymentLister: deploymentInformer.Lister(), + serviceLister: serviceInformer.Lister(), + endpointsLister: endpointsInformer.Lister(), + } + r.impl = controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger)) + + r.Logger.Info("Setting up event handlers") + kafkachannelInformer.Informer().AddEventHandler(reconciler.Handler(r.impl.Enqueue)) + + // Set up watches for dispatcher resources we care about, since any changes to these + // resources will affect our Channels. So, set up a watch here, that will cause + // a global Resync for all the channels to take stock of their health when these change. + deploymentInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.FilterWithNameAndNamespace(dispatcherNamespace, dispatcherDeploymentName), + Handler: r, + }) + serviceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.FilterWithNameAndNamespace(dispatcherNamespace, dispatcherServiceName), + Handler: r, + }) + endpointsInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.FilterWithNameAndNamespace(dispatcherNamespace, dispatcherServiceName), + Handler: r, + }) + return r.impl +} + +// cache.ResourceEventHandler implementation. +// These 3 functions just cause a Global Resync of the channels, because any changes here +// should be reflected onto the channels. +func (r *Reconciler) OnAdd(obj interface{}) { + r.impl.GlobalResync(r.kafkachannelInformer) +} + +func (r *Reconciler) OnUpdate(old, new interface{}) { + r.impl.GlobalResync(r.kafkachannelInformer) +} + +func (r *Reconciler) OnDelete(obj interface{}) { + r.impl.GlobalResync(r.kafkachannelInformer) +} + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the KafkaChannel resource +// with the current status of the resource. +func (r *Reconciler) Reconcile(ctx context.Context, key string) error { + // Convert the namespace/name string into a distinct namespace and name. + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logging.FromContext(ctx).Error("invalid resource key") + return nil + } + + // Get the KafkaChannel resource with this namespace/name. + original, err := r.kafkachannelLister.KafkaChannels(namespace).Get(name) + if apierrs.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing. + logging.FromContext(ctx).Error("KafkaChannel key in work queue no longer exists") + return nil + } else if err != nil { + return err + } + + // Don't modify the informers copy. + channel := original.DeepCopy() + + // Reconcile this copy of the KafkaChannel and then write back any status updates regardless of + // whether the reconcile error out. + reconcileErr := r.reconcile(ctx, channel) + if reconcileErr != nil { + logging.FromContext(ctx).Error("Error reconciling KafkaChannel", zap.Error(reconcileErr)) + r.Recorder.Eventf(channel, corev1.EventTypeWarning, channelReconcileFailed, "KafkaChannel reconciliation failed: %v", reconcileErr) + } else { + logging.FromContext(ctx).Debug("KafkaChannel reconciled") + r.Recorder.Event(channel, corev1.EventTypeNormal, channelReconciled, "KafkaChannel reconciled") + } + + if _, updateStatusErr := r.updateStatus(ctx, channel); updateStatusErr != nil { + logging.FromContext(ctx).Error("Failed to update GoogleCloudPubSubChannel status", zap.Error(updateStatusErr)) + r.Recorder.Eventf(channel, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update KafkaChannel's status: %v", updateStatusErr) + return updateStatusErr + } + + // Requeue if the resource is not ready + return reconcileErr +} + +func (r *Reconciler) reconcile(ctx context.Context, kc *v1alpha1.KafkaChannel) error { + kc.Status.InitializeConditions() + + logger := logging.FromContext(ctx) + // Verify channel is valid. + if err := kc.Validate(ctx); err != nil { + logger.Error("Invalid kafka channel", zap.String("channel", kc.Name), zap.Error(err)) + return err + } + + kafkaClusterAdmin, err := r.createClient(ctx, kc) + if err != nil { + logger.Error("Unable to build kafka admin client", zap.String("channel", kc.Name), zap.Error(err)) + return err + } + + // See if the channel has been deleted. + if kc.DeletionTimestamp != nil { + if err := r.deleteTopic(ctx, kc, kafkaClusterAdmin); err != nil { + return err + } + removeFinalizer(kc) + _, err := r.eventingClientSet.MessagingV1alpha1().KafkaChannels(kc.Namespace).Update(kc) + return err + } + + // If we are adding the finalizer for the first time, then ensure that finalizer is persisted + // before manipulating Kafka. + if err := r.ensureFinalizer(kc); err != nil { + return err + } + + // We reconcile the status of the Channel by looking at: + // 1. Kafka topic used by the channel. + // 2. Dispatcher Deployment for it's readiness. + // 3. Dispatcher k8s Service for it's existence. + // 4. Dispatcher endpoints to ensure that there's something backing the Service. + // 5. K8s service representing the channel that will use ExternalName to point to the Dispatcher k8s service. + + if err := r.createTopic(ctx, kc, kafkaClusterAdmin); err != nil { + kc.Status.MarkTopicFailed("TopicCreateFailed", "error while creating topic: %s", err) + return err + } + kc.Status.MarkTopicTrue() + + // Get the Dispatcher Deployment and propagate the status to the Channel + d, err := r.deploymentLister.Deployments(r.dispatcherNamespace).Get(r.dispatcherDeploymentName) + if err != nil { + if apierrs.IsNotFound(err) { + kc.Status.MarkDispatcherFailed("DispatcherDeploymentDoesNotExist", "Dispatcher Deployment does not exist") + } else { + logger.Error("Unable to get the dispatcher Deployment", zap.Error(err)) + kc.Status.MarkDispatcherFailed("DispatcherDeploymentGetFailed", "Failed to get dispatcher Deployment") + } + return err + } + kc.Status.PropagateDispatcherStatus(&d.Status) + + // Get the Dispatcher Service and propagate the status to the Channel in case it does not exist. + // We don't do anything with the service because it's status contains nothing useful, so just do + // an existence check. Then below we check the endpoints targeting it. + _, err = r.serviceLister.Services(r.dispatcherNamespace).Get(r.dispatcherServiceName) + if err != nil { + if apierrs.IsNotFound(err) { + kc.Status.MarkServiceFailed("DispatcherServiceDoesNotExist", "Dispatcher Service does not exist") + } else { + logger.Error("Unable to get the dispatcher service", zap.Error(err)) + kc.Status.MarkServiceFailed("DispatcherServiceGetFailed", "Failed to get dispatcher service") + } + return err + } + kc.Status.MarkServiceTrue() + + // Get the Dispatcher Service Endpoints and propagate the status to the Channel + // endpoints has the same name as the service, so not a bug. + e, err := r.endpointsLister.Endpoints(r.dispatcherNamespace).Get(r.dispatcherServiceName) + if err != nil { + if apierrs.IsNotFound(err) { + kc.Status.MarkEndpointsFailed("DispatcherEndpointsDoesNotExist", "Dispatcher Endpoints does not exist") + } else { + logger.Error("Unable to get the dispatcher endpoints", zap.Error(err)) + kc.Status.MarkEndpointsFailed("DispatcherEndpointsGetFailed", "Failed to get dispatcher endpoints") + } + return err + } + + if len(e.Subsets) == 0 { + logger.Error("No endpoints found for Dispatcher service", zap.Error(err)) + kc.Status.MarkEndpointsFailed("DispatcherEndpointsNotReady", "There are no endpoints ready for Dispatcher service") + return fmt.Errorf("there are no endpoints ready for Dispatcher service %s", r.dispatcherServiceName) + } + kc.Status.MarkEndpointsTrue() + + // Reconcile the k8s service representing the actual Channel. It points to the Dispatcher service via ExternalName + svc, err := r.reconcileChannelService(ctx, kc) + if err != nil { + kc.Status.MarkChannelServiceFailed("ChannelServiceFailed", fmt.Sprintf("Channel Service failed: %s", err)) + return err + } + kc.Status.MarkChannelServiceTrue() + kc.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace)) + + // close the connection + err = kafkaClusterAdmin.Close() + if err != nil { + logger.Error("Error closing the connection", zap.Error(err)) + return err + } + + // Ok, so now the Dispatcher Deployment & Service have been created, we're golden since the + // dispatcher watches the Channel and where it needs to dispatch events to. + return nil +} + +func (r *Reconciler) reconcileChannelService(ctx context.Context, channel *v1alpha1.KafkaChannel) (*corev1.Service, error) { + logger := logging.FromContext(ctx) + // Get the Service and propagate the status to the Channel in case it does not exist. + // We don't do anything with the service because it's status contains nothing useful, so just do + // an existence check. Then below we check the endpoints targeting it. + // We may change this name later, so we have to ensure we use proper addressable when resolving these. + svc, err := r.serviceLister.Services(channel.Namespace).Get(resources.MakeChannelServiceName(channel.Name)) + if err != nil { + if apierrs.IsNotFound(err) { + svc, err = resources.MakeK8sService(channel, resources.ExternalService(r.dispatcherNamespace, r.dispatcherServiceName)) + if err != nil { + logger.Error("Failed to create the channel service object", zap.Error(err)) + return nil, err + } + svc, err = r.KubeClientSet.CoreV1().Services(channel.Namespace).Create(svc) + if err != nil { + logger.Error("Failed to create the channel service", zap.Error(err)) + return nil, err + } + return svc, nil + } else { + logger.Error("Unable to get the channel service", zap.Error(err)) + } + return nil, err + } + // Check to make sure that the KafkaChannel owns this service and if not, complain. + if !metav1.IsControlledBy(svc, channel) { + return nil, fmt.Errorf("kafkachannel: %s/%s does not own Service: %q", channel.Namespace, channel.Name, svc.Name) + } + return svc, nil +} + +func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) { + kc, err := r.kafkachannelLister.KafkaChannels(desired.Namespace).Get(desired.Name) + if err != nil { + return nil, err + } + + if reflect.DeepEqual(kc.Status, desired.Status) { + return kc, nil + } + + becomesReady := desired.Status.IsReady() && !kc.Status.IsReady() + + // Don't modify the informers copy. + existing := kc.DeepCopy() + existing.Status = desired.Status + + new, err := r.eventingClientSet.MessagingV1alpha1().KafkaChannels(desired.Namespace).UpdateStatus(existing) + if err == nil && becomesReady { + duration := time.Since(new.ObjectMeta.CreationTimestamp.Time) + r.Logger.Infof("KafkaChannel %q became ready after %v", kc.Name, duration) + if err := r.StatsReporter.ReportReady("Channel", kc.Namespace, kc.Name, duration); err != nil { + r.Logger.Infof("Failed to record ready for KafkaChannel %q: %v", kc.Name, err) + } + } + return new, err +} + +func (r *Reconciler) createClient(ctx context.Context, kc *v1alpha1.KafkaChannel) (sarama.ClusterAdmin, error) { + // We don't currently initialize r.kafkaClusterAdmin, hence we end up creating the cluster admin client every time. + // This is because of an issue with Shopify/sarama. See https://github.com/Shopify/sarama/issues/1162. + // Once the issue is fixed we should use a shared cluster admin client. Also, r.kafkaClusterAdmin is currently + // used to pass a fake admin client in the tests. + kafkaClusterAdmin := r.kafkaClusterAdmin + if kafkaClusterAdmin == nil { + var err error + kafkaClusterAdmin, err = resources.MakeClient(controllerAgentName, r.kafkaConfig.Brokers) + if err != nil { + return nil, err + } + } + return kafkaClusterAdmin, nil +} + +func (r *Reconciler) createTopic(ctx context.Context, channel *v1alpha1.KafkaChannel, kafkaClusterAdmin sarama.ClusterAdmin) error { + logger := logging.FromContext(ctx) + + topicName := resources.MakeTopicName(channel) + logger.Info("Creating topic on Kafka cluster", zap.String("topic", topicName)) + err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ + ReplicationFactor: channel.Spec.ReplicationFactor, + NumPartitions: channel.Spec.NumPartitions, + }, false) + if err == sarama.ErrTopicAlreadyExists { + return nil + } else if err != nil { + logger.Error("Error creating topic", zap.String("topic", topicName), zap.Error(err)) + } else { + logger.Info("Successfully created topic", zap.String("topic", topicName)) + } + return err +} + +func (r *Reconciler) deleteTopic(ctx context.Context, channel *v1alpha1.KafkaChannel, kafkaClusterAdmin sarama.ClusterAdmin) error { + logger := logging.FromContext(ctx) + + topicName := resources.MakeTopicName(channel) + logger.Info("Deleting topic on Kafka Cluster", zap.String("topic", topicName)) + err := kafkaClusterAdmin.DeleteTopic(topicName) + if err == sarama.ErrUnknownTopicOrPartition { + return nil + } else if err != nil { + logger.Error("Error deleting topic", zap.String("topic", topicName), zap.Error(err)) + } else { + logger.Info("Successfully deleted topic", zap.String("topic", topicName)) + } + return err +} + +func (r *Reconciler) ensureFinalizer(channel *v1alpha1.KafkaChannel) error { + finalizers := sets.NewString(channel.Finalizers...) + if finalizers.Has(finalizerName) { + return nil + } + + mergePatch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": append(channel.Finalizers, finalizerName), + "resourceVersion": channel.ResourceVersion, + }, + } + + patch, err := json.Marshal(mergePatch) + if err != nil { + return err + } + + _, err = r.eventingClientSet.MessagingV1alpha1().KafkaChannels(channel.Namespace).Patch(channel.Name, types.MergePatchType, patch) + return err +} + +func removeFinalizer(channel *v1alpha1.KafkaChannel) { + finalizers := sets.NewString(channel.Finalizers...) + finalizers.Delete(finalizerName) + channel.Finalizers = finalizers.List() +} diff --git a/contrib/kafka/pkg/reconciler/kafkachannel_test.go b/contrib/kafka/pkg/reconciler/kafkachannel_test.go new file mode 100644 index 00000000000..374a28ac1fa --- /dev/null +++ b/contrib/kafka/pkg/reconciler/kafkachannel_test.go @@ -0,0 +1,433 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "fmt" + . "github.com/knative/eventing/contrib/kafka/pkg/utils" + "testing" + + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + fakeclientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/fake" + informers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions" + "github.com/knative/eventing/pkg/reconciler" + reconciletesting "github.com/knative/eventing/pkg/reconciler/testing" + "github.com/knative/eventing/pkg/utils" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + "github.com/knative/pkg/controller" + "github.com/knative/pkg/kmeta" + logtesting "github.com/knative/pkg/logging/testing" + . "github.com/knative/pkg/reconciler/testing" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeinformers "k8s.io/client-go/informers" + fakekubeclientset "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/kubernetes/scheme" +) + +const ( + systemNS = "knative-eventing" + testNS = "test-namespace" + kcName = "test-kc" + dispatcherDeploymentName = "test-deployment" + dispatcherServiceName = "test-service" + channelServiceAddress = "test-kc-kn-channel.test-namespace.svc.cluster.local" + + subscriberAPIVersion = "v1" + subscriberKind = "Service" + subscriberName = "subscriberName" + subscriberURI = "http://example.com/subscriber" +) + +var ( + trueVal = true + // deletionTime is used when objects are marked as deleted. Rfc3339Copy() + // truncates to seconds to match the loss of precision during serialization. + deletionTime = metav1.Now().Rfc3339Copy() +) + +func init() { + // Add types to scheme + _ = v1alpha1.AddToScheme(scheme.Scheme) + _ = duckv1alpha1.AddToScheme(scheme.Scheme) +} + +func TestNewController(t *testing.T) { + kubeClient := fakekubeclientset.NewSimpleClientset() + eventingClient := fakeclientset.NewSimpleClientset() + + // Create informer factories with fake clients. The second parameter sets the + // resync period to zero, disabling it. + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, 0) + eventingInformerFactory := informers.NewSharedInformerFactory(eventingClient, 0) + + kafkaChannelInformer := eventingInformerFactory.Messaging().V1alpha1().KafkaChannels() + + // Kube + serviceInformer := kubeInformerFactory.Core().V1().Services() + endpointsInformer := kubeInformerFactory.Core().V1().Endpoints() + deploymentInformer := kubeInformerFactory.Apps().V1().Deployments() + + kafkaConfig := &KafkaConfig{ + Brokers: []string{"boostrap_server"}, + } + + c := NewController( + reconciler.Options{ + KubeClientSet: kubeClient, + Logger: logtesting.TestLogger(t), + }, + nil, // TODO fix this + kafkaConfig, + systemNS, + dispatcherDeploymentName, + dispatcherServiceName, + kafkaChannelInformer, + deploymentInformer, + serviceInformer, + endpointsInformer) + + if c == nil { + t.Fatalf("Failed to create with NewController") + } +} + +func TestAllCases(t *testing.T) { + // imcKey := testNS + "/" + kcName + table := TableTest{ + { + Name: "bad workqueue key", + // Make sure Reconcile handles bad keys. + Key: "too/many/parts", + }, + //}, { + // Name: "key not found", + // // Make sure Reconcile handles good keys that don't exist. + // Key: "foo/not-found", + //}, + // }, { // TODO: there is a bug in the controller, it will query for "" + // Name: "trigger key not found ", + // Objects: []runtime.Object{ + // reconciletesting.NewTrigger(triggerName, testNS), + // }, + // Key: "foo/incomplete", + // WantErr: true, + // WantEvents: []string{ + // Eventf(corev1.EventTypeWarning, "ChannelReferenceFetchFailed", "Failed to validate spec.channel exists: s \"\" not found"), + // }, + //}, { + // Name: "deleting", + // Key: imcKey, + // Objects: []runtime.Object{ + // reconciletesting.NewInMemoryChannel(imcName, testNS, + // reconciletesting.WithInitInMemoryChannelConditions, + // reconciletesting.WithInMemoryChannelDeleted)}, + // WantErr: false, + // WantEvents: []string{ + // Eventf(corev1.EventTypeNormal, "Reconciled", "InMemoryChannel reconciled"), + // }, + //}, { + // Name: "deployment does not exist", + // Key: imcKey, + // Objects: []runtime.Object{ + // reconciletesting.NewInMemoryChannel(imcName, testNS), + // }, + // WantErr: true, + // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + // reconciletesting.WithInitInMemoryChannelConditions, + // reconciletesting.WithInMemoryChannelDeploymentNotReady("DispatcherDeploymentDoesNotExist", "Dispatcher Deployment does not exist")), + // }}, + // WantEvents: []string{ + // Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: deployment.apps \"test-deployment\" not found"), + // }, + //}, { + // Name: "Service does not exist", + // Key: imcKey, + // Objects: []runtime.Object{ + // makeReadyDeployment(), + // reconciletesting.NewInMemoryChannel(imcName, testNS), + // }, + // WantErr: true, + // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + // reconciletesting.WithInitInMemoryChannelConditions, + // reconciletesting.WithInMemoryChannelDeploymentReady(), + // reconciletesting.WithInMemoryChannelServicetNotReady("DispatcherServiceDoesNotExist", "Dispatcher Service does not exist")), + // }}, + // WantEvents: []string{ + // Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: service \"test-service\" not found"), + // }, + //}, { + // Name: "Endpoints does not exist", + // Key: imcKey, + // Objects: []runtime.Object{ + // makeReadyDeployment(), + // makeService(), + // reconciletesting.NewInMemoryChannel(imcName, testNS), + // }, + // WantErr: true, + // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + // reconciletesting.WithInitInMemoryChannelConditions, + // reconciletesting.WithInMemoryChannelDeploymentReady(), + // reconciletesting.WithInMemoryChannelServiceReady(), + // reconciletesting.WithInMemoryChannelEndpointsNotReady("DispatcherEndpointsDoesNotExist", "Dispatcher Endpoints does not exist"), + // ), + // }}, + // WantEvents: []string{ + // Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: endpoints \"test-service\" not found"), + // }, + //}, { + // Name: "Endpoints not ready", + // Key: imcKey, + // Objects: []runtime.Object{ + // makeReadyDeployment(), + // makeService(), + // makeEmptyEndpoints(), + // reconciletesting.NewInMemoryChannel(imcName, testNS), + // }, + // WantErr: true, + // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + // reconciletesting.WithInitInMemoryChannelConditions, + // reconciletesting.WithInMemoryChannelDeploymentReady(), + // reconciletesting.WithInMemoryChannelServiceReady(), + // reconciletesting.WithInMemoryChannelEndpointsNotReady("DispatcherEndpointsNotReady", "There are no endpoints ready for Dispatcher service"), + // ), + // }}, + // WantEvents: []string{ + // Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: there are no endpoints ready for Dispatcher service"), + // }, + //}, { + // Name: "Works, creates new channel", + // Key: imcKey, + // Objects: []runtime.Object{ + // makeReadyDeployment(), + // makeService(), + // makeReadyEndpoints(), + // reconciletesting.NewInMemoryChannel(imcName, testNS), + // }, + // WantErr: false, + // WantCreates: []metav1.Object{ + // makeChannelService(reconciletesting.NewInMemoryChannel(imcName, testNS)), + // }, + // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + // reconciletesting.WithInitInMemoryChannelConditions, + // reconciletesting.WithInMemoryChannelDeploymentReady(), + // reconciletesting.WithInMemoryChannelServiceReady(), + // reconciletesting.WithInMemoryChannelEndpointsReady(), + // reconciletesting.WithInMemoryChannelChannelServiceReady(), + // reconciletesting.WithInMemoryChannelAddress(channelServiceAddress), + // ), + // }}, + // WantEvents: []string{ + // Eventf(corev1.EventTypeNormal, "Reconciled", "InMemoryChannel reconciled"), + // }, + //}, { + // Name: "Works, channel exists", + // Key: imcKey, + // Objects: []runtime.Object{ + // makeReadyDeployment(), + // makeService(), + // makeReadyEndpoints(), + // reconciletesting.NewInMemoryChannel(imcName, testNS), + // makeChannelService(reconciletesting.NewInMemoryChannel(imcName, testNS)), + // }, + // WantErr: false, + // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + // reconciletesting.WithInitInMemoryChannelConditions, + // reconciletesting.WithInMemoryChannelDeploymentReady(), + // reconciletesting.WithInMemoryChannelServiceReady(), + // reconciletesting.WithInMemoryChannelEndpointsReady(), + // reconciletesting.WithInMemoryChannelChannelServiceReady(), + // reconciletesting.WithInMemoryChannelAddress(channelServiceAddress), + // ), + // }}, + // WantEvents: []string{ + // Eventf(corev1.EventTypeNormal, "Reconciled", "InMemoryChannel reconciled"), + // }, + //}, { + // Name: "channel exists, not owned by us", + // Key: imcKey, + // Objects: []runtime.Object{ + // makeReadyDeployment(), + // makeService(), + // makeReadyEndpoints(), + // reconciletesting.NewInMemoryChannel(imcName, testNS), + // makeChannelServiceNotOwnedByUs(reconciletesting.NewInMemoryChannel(imcName, testNS)), + // }, + // WantErr: true, + // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + // reconciletesting.WithInitInMemoryChannelConditions, + // reconciletesting.WithInMemoryChannelDeploymentReady(), + // reconciletesting.WithInMemoryChannelServiceReady(), + // reconciletesting.WithInMemoryChannelEndpointsReady(), + // reconciletesting.WithInMemoryChannelChannelServicetNotReady("ChannelServiceFailed", "Channel Service failed: inmemorychannel: test-namespace/test-imc does not own Service: \"test-imc-kn-channel\""), + // ), + // }}, + // WantEvents: []string{ + // Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: inmemorychannel: test-namespace/test-imc does not own Service: \"test-imc-kn-channel\""), + // }, + //}, { + // Name: "channel does not exist, fails to create", + // Key: imcKey, + // Objects: []runtime.Object{ + // makeReadyDeployment(), + // makeService(), + // makeReadyEndpoints(), + // reconciletesting.NewInMemoryChannel(imcName, testNS), + // }, + // WantErr: true, + // WithReactors: []clientgotesting.ReactionFunc{ + // InduceFailure("create", "Services"), + // }, + // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, + // reconciletesting.WithInitInMemoryChannelConditions, + // reconciletesting.WithInMemoryChannelDeploymentReady(), + // reconciletesting.WithInMemoryChannelServiceReady(), + // reconciletesting.WithInMemoryChannelEndpointsReady(), + // reconciletesting.WithInMemoryChannelChannelServicetNotReady("ChannelServiceFailed", "Channel Service failed: inducing failure for create services"), + // ), + // }}, + // WantCreates: []metav1.Object{ + // makeChannelService(reconciletesting.NewInMemoryChannel(imcName, testNS)), + // }, + // WantEvents: []string{ + // Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: inducing failure for create services"), + // }, + //}, {}, + } + defer logtesting.ClearAll() + + table.Test(t, reconciletesting.MakeFactory(func(listers *reconciletesting.Listers, opt reconciler.Options) controller.Reconciler { + return &Reconciler{ + Base: reconciler.NewBase(opt, controllerAgentName), + dispatcherNamespace: testNS, + dispatcherDeploymentName: dispatcherDeploymentName, + dispatcherServiceName: dispatcherServiceName, + // TODO: Fix + kafkachannelLister: nil, + kafkachannelInformer: nil, + deploymentLister: listers.GetDeploymentLister(), + serviceLister: listers.GetServiceLister(), + endpointsLister: listers.GetEndpointsLister(), + } + }, + false, + )) +} + +func makeDeployment() *appsv1.Deployment { + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNS, + Name: dispatcherDeploymentName, + }, + Status: appsv1.DeploymentStatus{}, + } +} + +func makeReadyDeployment() *appsv1.Deployment { + d := makeDeployment() + d.Status.Conditions = []appsv1.DeploymentCondition{{Type: appsv1.DeploymentAvailable, Status: corev1.ConditionTrue}} + return d +} + +func makeService() *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNS, + Name: dispatcherServiceName, + }, + } +} + +func makeChannelService(kc *v1alpha1.KafkaChannel) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNS, + Name: fmt.Sprintf("%s-kn-channel", kcName), + Labels: map[string]string{ + "messaging.knative.dev/role": "kafka-channel", + }, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(kc), + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: fmt.Sprintf("%s.%s.svc.%s", dispatcherServiceName, testNS, utils.GetClusterDomainName()), + }, + } +} + +func makeChannelServiceNotOwnedByUs(kc *v1alpha1.KafkaChannel) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNS, + Name: fmt.Sprintf("%s-kn-channel", kcName), + Labels: map[string]string{ + "messaging.knative.dev/role": "kafka-channel", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: fmt.Sprintf("%s.%s.svc.%s", dispatcherServiceName, testNS, utils.GetClusterDomainName()), + }, + } +} + +func makeEmptyEndpoints() *corev1.Endpoints { + return &corev1.Endpoints{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Endpoints", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNS, + Name: dispatcherServiceName, + }, + } +} + +func makeReadyEndpoints() *corev1.Endpoints { + e := makeEmptyEndpoints() + e.Subsets = []corev1.EndpointSubset{{Addresses: []corev1.EndpointAddress{{IP: "1.1.1.1"}}}} + return e +} diff --git a/contrib/kafka/pkg/reconciler/resources/client.go b/contrib/kafka/pkg/reconciler/resources/client.go new file mode 100644 index 00000000000..d2af5852ecd --- /dev/null +++ b/contrib/kafka/pkg/reconciler/resources/client.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "github.com/Shopify/sarama" +) + +func MakeClient(clientID string, bootstrapServers []string) (sarama.ClusterAdmin, error) { + saramaConf := sarama.NewConfig() + saramaConf.Version = sarama.V1_1_0_0 + saramaConf.ClientID = clientID + return sarama.NewClusterAdmin(bootstrapServers, saramaConf) +} diff --git a/contrib/kafka/pkg/reconciler/resources/service.go b/contrib/kafka/pkg/reconciler/resources/service.go new file mode 100644 index 00000000000..58dad0589be --- /dev/null +++ b/contrib/kafka/pkg/reconciler/resources/service.go @@ -0,0 +1,96 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "github.com/knative/eventing/pkg/utils" + "github.com/knative/pkg/kmeta" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + portName = "http" + portNumber = 80 + MessagingRoleLabel = "messaging.knative.dev/role" + MessagingRole = "kafka-channel" +) + +// ServiceOption can be used to optionally modify the K8s service in MakeK8sService. +type ServiceOption func(*corev1.Service) error + +func MakeExternalServiceAddress(namespace, service string) string { + return fmt.Sprintf("%s.%s.svc.%s", service, namespace, utils.GetClusterDomainName()) +} + +func MakeChannelServiceName(name string) string { + return fmt.Sprintf("%s-kn-channel", name) +} + +// ExternalService is a functional option for MakeK8sService to create a K8s service of type ExternalName +// pointing to the specified service in a namespace. +func ExternalService(namespace, service string) ServiceOption { + return func(svc *corev1.Service) error { + // TODO this overrides the current serviceSpec. Is this correct? + svc.Spec = corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: MakeExternalServiceAddress(namespace, service), + } + return nil + } +} + +// MakeK8sService creates a new K8s Service for a Channel resource. It also sets the appropriate +// OwnerReferences on the resource so handleObject can discover the Channel resource that 'owns' it. +// As well as being garbage collected when the Channel is deleted. +func MakeK8sService(kc *v1alpha1.KafkaChannel, opts ...ServiceOption) (*corev1.Service, error) { + // Add annotations + svc := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: MakeChannelServiceName(kc.ObjectMeta.Name), + Namespace: kc.Namespace, + Labels: map[string]string{ + MessagingRoleLabel: MessagingRole, + }, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(kc), + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: portName, + Protocol: corev1.ProtocolTCP, + Port: portNumber, + }, + }, + }, + } + for _, opt := range opts { + if err := opt(svc); err != nil { + return nil, err + } + } + return svc, nil +} diff --git a/contrib/kafka/pkg/reconciler/resources/service_test.go b/contrib/kafka/pkg/reconciler/resources/service_test.go new file mode 100644 index 00000000000..66dac7ff372 --- /dev/null +++ b/contrib/kafka/pkg/reconciler/resources/service_test.go @@ -0,0 +1,143 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "errors" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "github.com/knative/pkg/kmeta" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + serviceName = "my-test-service" + kcName = "my-test-kc" + testNS = "my-test-ns" + dispatcherNS = "dispatcher-namespace" + dispatcherName = "dispatcher-name" +) + +func TestMakeExternalServiceAddress(t *testing.T) { + if want, got := "my-test-service.my-test-ns.svc.cluster.local", MakeExternalServiceAddress(testNS, serviceName); want != got { + t.Errorf("Want: %q got %q", want, got) + } +} + +func TestMakeChannelServiceAddress(t *testing.T) { + if want, got := "my-test-kc-kn-channel", MakeChannelServiceName(kcName); want != got { + t.Errorf("Want: %q got %q", want, got) + } +} + +func TestMakeService(t *testing.T) { + imc := &v1alpha1.KafkaChannel{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcName, + Namespace: testNS, + }, + } + want := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-kn-channel", kcName), + Namespace: testNS, + Labels: map[string]string{ + MessagingRoleLabel: MessagingRole, + }, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(imc), + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: portName, + Protocol: corev1.ProtocolTCP, + Port: portNumber, + }, + }, + }, + } + + got, err := MakeK8sService(imc) + if err != nil { + t.Fatalf("Failed to create new service: %s", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("unexpected condition (-want, +got) = %v", diff) + } +} + +func TestMakeServiceWithExternal(t *testing.T) { + imc := &v1alpha1.KafkaChannel{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcName, + Namespace: testNS, + }, + } + want := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-kn-channel", kcName), + Namespace: testNS, + Labels: map[string]string{ + MessagingRoleLabel: MessagingRole, + }, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(imc), + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "dispatcher-name.dispatcher-namespace.svc.cluster.local", + }, + } + + got, err := MakeK8sService(imc, ExternalService(dispatcherNS, dispatcherName)) + if err != nil { + t.Fatalf("Failed to create new service: %s", err) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("unexpected condition (-want, +got) = %v", diff) + } +} + +func TestMakeServiceWithFailingOption(t *testing.T) { + imc := &v1alpha1.KafkaChannel{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcName, + Namespace: testNS, + }, + } + _, err := MakeK8sService(imc, func(svc *corev1.Service) error { return errors.New("test-induced failure") }) + if err == nil { + t.Fatalf("Expcted error from new service but got none") + } +} diff --git a/contrib/kafka/pkg/reconciler/resources/topic.go b/contrib/kafka/pkg/reconciler/resources/topic.go new file mode 100644 index 00000000000..81d756c2d74 --- /dev/null +++ b/contrib/kafka/pkg/reconciler/resources/topic.go @@ -0,0 +1,30 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" +) + +const ( + knativeKafkaTopicPrefix = "knative-messaging-kafka" +) + +func MakeTopicName(channel *v1alpha1.KafkaChannel) string { + return fmt.Sprintf("%s.%s.%s", knativeKafkaTopicPrefix, channel.Namespace, channel.Name) +} diff --git a/contrib/kafka/pkg/utils/util.go b/contrib/kafka/pkg/utils/util.go new file mode 100644 index 00000000000..408132b087a --- /dev/null +++ b/contrib/kafka/pkg/utils/util.go @@ -0,0 +1,86 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "fmt" + "log" + "strings" + + cluster "github.com/bsm/sarama-cluster" + + "github.com/knative/pkg/configmap" +) + +const ( + BrokerConfigMapKey = "bootstrap_servers" + ConsumerModeConfigMapKey = "consumer_mode" + ConsumerModePartitionConsumerValue = "partitions" + ConsumerModeMultiplexConsumerValue = "multiplex" + KafkaChannelSeparator = "." + + // DefaultNumPartitions defines the default number of partitions + DefaultNumPartitions = 1 + + // DefaultReplicationFactor defines the default number of replications + DefaultReplicationFactor = 1 +) + +type KafkaConfig struct { + Brokers []string + ConsumerMode cluster.ConsumerMode +} + +// GetKafkaConfig returns the details of the Kafka cluster. +func GetKafkaConfig(path string) (*KafkaConfig, error) { + configMap, err := configmap.Load(path) + if err != nil { + return nil, fmt.Errorf("error loading configuration: %s", err) + } + + if len(configMap) == 0 { + return nil, fmt.Errorf("missing configuration") + } + + config := &KafkaConfig{} + + if brokers, ok := configMap[BrokerConfigMapKey]; ok { + bootstrapServers := strings.Split(brokers, ",") + for _, s := range bootstrapServers { + if len(s) == 0 { + return nil, fmt.Errorf("empty %s value in configuration", BrokerConfigMapKey) + } + } + config.Brokers = bootstrapServers + } else { + return nil, fmt.Errorf("missing key %s in configuration", BrokerConfigMapKey) + } + + config.ConsumerMode = cluster.ConsumerModeMultiplex + if mode, ok := configMap[ConsumerModeConfigMapKey]; ok { + switch strings.ToLower(mode) { + case ConsumerModeMultiplexConsumerValue: + config.ConsumerMode = cluster.ConsumerModeMultiplex + case ConsumerModePartitionConsumerValue: + config.ConsumerMode = cluster.ConsumerModePartitions + default: + log.Printf("consumer_mode: %q is invalid. Using default mode %q", mode, ConsumerModeMultiplexConsumerValue) + config.ConsumerMode = cluster.ConsumerModeMultiplex + } + } + return config, nil +} diff --git a/contrib/kafka/pkg/controller/util_test.go b/contrib/kafka/pkg/utils/util_test.go similarity index 72% rename from contrib/kafka/pkg/controller/util_test.go rename to contrib/kafka/pkg/utils/util_test.go index 526f183bd86..f599351f2bd 100644 --- a/contrib/kafka/pkg/controller/util_test.go +++ b/contrib/kafka/pkg/utils/util_test.go @@ -1,4 +1,20 @@ -package controller +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils import ( "io/ioutil" @@ -12,53 +28,53 @@ import ( _ "github.com/knative/pkg/system/testing" ) -func TestGetProvisionerConfigBrokers(t *testing.T) { +func TestGetKafkaConfig(t *testing.T) { testCases := []struct { name string data map[string]string path string getError string - expected *KafkaProvisionerConfig + expected *KafkaConfig }{ { name: "invalid config path", path: "/tmp/does_not_exist", - getError: "error loading provisioner configuration: lstat /tmp/does_not_exist: no such file or directory", + getError: "error loading configuration: lstat /tmp/does_not_exist: no such file or directory", }, { name: "configmap with no data", data: map[string]string{}, - getError: "missing provisioner configuration", + getError: "missing configuration", }, { name: "configmap with no bootstrap_servers key", data: map[string]string{"key": "value"}, - getError: "missing key bootstrap_servers in provisioner configuration", + getError: "missing key bootstrap_servers in configuration", }, { name: "configmap with empty bootstrap_servers value", data: map[string]string{"bootstrap_servers": ""}, - getError: "empty bootstrap_servers value in provisioner configuration", + getError: "empty bootstrap_servers value in configuration", }, { name: "single bootstrap_servers", data: map[string]string{"bootstrap_servers": "kafkabroker.kafka:9092"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker.kafka:9092"}, }, }, { name: "multiple bootstrap_servers", data: map[string]string{"bootstrap_servers": "kafkabroker1.kafka:9092,kafkabroker2.kafka:9092"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker1.kafka:9092", "kafkabroker2.kafka:9092"}, }, }, { name: "partition consumer", data: map[string]string{"bootstrap_servers": "kafkabroker.kafka:9092", "consumer_mode": "partitions"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker.kafka:9092"}, ConsumerMode: cluster.ConsumerModePartitions, }, @@ -66,7 +82,7 @@ func TestGetProvisionerConfigBrokers(t *testing.T) { { name: "default multiplex", data: map[string]string{"bootstrap_servers": "kafkabroker.kafka:9092", "consumer_mode": "multiplex"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker.kafka:9092"}, ConsumerMode: cluster.ConsumerModeMultiplex, }, @@ -74,7 +90,7 @@ func TestGetProvisionerConfigBrokers(t *testing.T) { { name: "default multiplex from invalid consumer_mode", data: map[string]string{"bootstrap_servers": "kafkabroker.kafka:9092", "consumer_mode": "foo"}, - expected: &KafkaProvisionerConfig{ + expected: &KafkaConfig{ Brokers: []string{"kafkabroker.kafka:9092"}, ConsumerMode: cluster.ConsumerModeMultiplex, }, @@ -103,7 +119,7 @@ func TestGetProvisionerConfigBrokers(t *testing.T) { tc.path = dir } - got, err := GetProvisionerConfig(tc.path) + got, err := GetKafkaConfig(tc.path) if tc.getError != "" { if err == nil { diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index b7e3fc9e7ab..d638b309d80 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -37,5 +37,18 @@ ${CODEGEN_PKG}/generate-groups.sh "deepcopy" \ "duck:v1alpha1" \ --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt +CONTRIB_DIRS=(contrib/kafka/pkg) + +for DIR in "${CONTRIB_DIRS[@]}"; do + # generate the code with: + # --output-base because this script should also be able to run inside the vendor dir of + # k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir + # instead of the $GOPATH directly. For normal projects this can be dropped. + ${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ + github.com/knative/eventing/${DIR}/client github.com/knative/eventing/${DIR}/apis \ + "messaging:v1alpha1" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt +done + # Make sure our dependencies are up-to-date ${REPO_ROOT_DIR}/hack/update-deps.sh From 5b8d417b9eeb9e614fd76cee30f57f1b70f04bac Mon Sep 17 00:00:00 2001 From: nachocano Date: Wed, 22 May 2019 11:38:31 -0700 Subject: [PATCH 30/64] cosmetics --- contrib/kafka/config/200-controller-clusterrole.yaml | 2 +- contrib/kafka/pkg/reconciler/kafkachannel.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/kafka/config/200-controller-clusterrole.yaml b/contrib/kafka/config/200-controller-clusterrole.yaml index ac144b8adff..40f6022ff55 100644 --- a/contrib/kafka/config/200-controller-clusterrole.yaml +++ b/contrib/kafka/config/200-controller-clusterrole.yaml @@ -49,7 +49,7 @@ rules: resources: - configmaps resourceNames: - - kafka-dispatcher + - kafka-ch-dispatcher verbs: - update - apiGroups: diff --git a/contrib/kafka/pkg/reconciler/kafkachannel.go b/contrib/kafka/pkg/reconciler/kafkachannel.go index d45eef0e0bb..877afa2fc1d 100644 --- a/contrib/kafka/pkg/reconciler/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/kafkachannel.go @@ -54,7 +54,7 @@ const ( // controllerAgentName is the string used by this controller to identify // itself when creating events. - controllerAgentName = "kafka-controller" + controllerAgentName = "kafka-ch-controller" finalizerName = controllerAgentName From be46c2d70f999e4665dc290a471ac34a766f395f Mon Sep 17 00:00:00 2001 From: nachocano Date: Wed, 22 May 2019 11:44:20 -0700 Subject: [PATCH 31/64] register as other projects --- contrib/kafka/pkg/apis/messaging/register.go | 21 +++++++++++++++++++ .../pkg/apis/messaging/v1alpha1/register.go | 7 ++----- 2 files changed, 23 insertions(+), 5 deletions(-) create mode 100644 contrib/kafka/pkg/apis/messaging/register.go diff --git a/contrib/kafka/pkg/apis/messaging/register.go b/contrib/kafka/pkg/apis/messaging/register.go new file mode 100644 index 00000000000..8f678adcd23 --- /dev/null +++ b/contrib/kafka/pkg/apis/messaging/register.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package messaging + +const ( + GroupName = "messaging.knative.dev" +) diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go index ba2eb8d3883..e320ce74970 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/register.go @@ -17,17 +17,14 @@ limitations under the License. package v1alpha1 import ( + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -const ( - groupName = "messaging.knative.dev" -) - // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: groupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: messaging.GroupName, Version: "v1alpha1"} // Kind takes an unqualified kind and returns back a Group qualified GroupKind func Kind(kind string) schema.GroupKind { From de3470b09ffc12d65e435b53bbae5fc9bf9204d7 Mon Sep 17 00:00:00 2001 From: nachocano Date: Wed, 22 May 2019 13:56:09 -0700 Subject: [PATCH 32/64] sockpuppet --- contrib/kafka/config/200-controller-clusterrole.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/kafka/config/200-controller-clusterrole.yaml b/contrib/kafka/config/200-controller-clusterrole.yaml index 40f6022ff55..2cb407bde1c 100644 --- a/contrib/kafka/config/200-controller-clusterrole.yaml +++ b/contrib/kafka/config/200-controller-clusterrole.yaml @@ -56,7 +56,7 @@ rules: - "" # Core API Group. resources: - events - verbs: + verbs: - create - patch - update From 6e57e7a3d20c6825495d03bb90e5cc7631c781ef Mon Sep 17 00:00:00 2001 From: nachocano Date: Wed, 22 May 2019 17:00:54 -0700 Subject: [PATCH 33/64] removing channelInformer from subscription controller as it is not needed. --- cmd/controller/main.go | 1 - pkg/reconciler/subscription/subscription.go | 9 --------- pkg/reconciler/subscription/subscription_test.go | 3 +-- 3 files changed, 1 insertion(+), 12 deletions(-) diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 2444b02d79e..59387fc6484 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -106,7 +106,6 @@ func main() { subscription.NewController( opt, subscriptionInformer, - channelInformer, addressableInformer, ), namespace.NewController( diff --git a/pkg/reconciler/subscription/subscription.go b/pkg/reconciler/subscription/subscription.go index 60124e19b06..1bd8bbf9171 100644 --- a/pkg/reconciler/subscription/subscription.go +++ b/pkg/reconciler/subscription/subscription.go @@ -82,7 +82,6 @@ var _ controller.Reconciler = (*Reconciler)(nil) func NewController( opt reconciler.Options, subscriptionInformer eventinginformers.SubscriptionInformer, - channelInformer eventinginformers.ChannelInformer, addressableInformer eventingduck.AddressableInformer, ) *controller.Impl { @@ -99,14 +98,6 @@ func NewController( // Tracker is used to notify us when the resources Subscription depends on change, so that the // Subscription needs to reconcile again. r.tracker = tracker.New(impl.EnqueueKey, opt.GetTrackerLease()) - channelInformer.Informer().AddEventHandler(reconciler.Handler( - // Call the tracker's OnChanged method, but we've seen the objects coming through this path - // missing TypeMeta, so ensure it is properly populated. - controller.EnsureTypeMeta( - r.tracker.OnChanged, - v1alpha1.SchemeGroupVersion.WithKind("Channel"), - ), - )) return impl } diff --git a/pkg/reconciler/subscription/subscription_test.go b/pkg/reconciler/subscription/subscription_test.go index 39f1b3d401e..e7794af4e67 100644 --- a/pkg/reconciler/subscription/subscription_test.go +++ b/pkg/reconciler/subscription/subscription_test.go @@ -627,13 +627,12 @@ func TestNew(t *testing.T) { eventingInformer := informers.NewSharedInformerFactory(eventingClient, 0) subscriptionInformer := eventingInformer.Eventing().V1alpha1().Subscriptions() - channelInformer := eventingInformer.Eventing().V1alpha1().Channels() addressableInformer := &fakeAddressableInformer{} c := NewController(reconciler.Options{ KubeClientSet: kubeClient, EventingClientSet: eventingClient, Logger: logtesting.TestLogger(t), - }, subscriptionInformer, channelInformer, addressableInformer) + }, subscriptionInformer, addressableInformer) if c == nil { t.Fatal("Expected NewController to return a non-nil value") From cf934f9ddd966c2d625354cad96c6a6d7312e433 Mon Sep 17 00:00:00 2001 From: nachocano Date: Wed, 22 May 2019 17:12:27 -0700 Subject: [PATCH 34/64] properly mark subscription as not ready --- .../eventing/v1alpha1/subscription_lifecycle.go | 14 ++++++++++++++ pkg/apis/eventing/v1alpha1/subscription_types.go | 4 ---- pkg/reconciler/subscription/subscription.go | 4 ++++ 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/pkg/apis/eventing/v1alpha1/subscription_lifecycle.go b/pkg/apis/eventing/v1alpha1/subscription_lifecycle.go index 193673d0adc..5c996fd4df8 100644 --- a/pkg/apis/eventing/v1alpha1/subscription_lifecycle.go +++ b/pkg/apis/eventing/v1alpha1/subscription_lifecycle.go @@ -18,6 +18,10 @@ package v1alpha1 import duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" +// subCondSet is a condition set with Ready as the happy condition and +// ReferencesResolved and ChannelReady as the dependent conditions. +var subCondSet = duckv1alpha1.NewLivingConditionSet(SubscriptionConditionReferencesResolved, SubscriptionConditionChannelReady) + const ( // SubscriptionConditionReady has status True when all subconditions below have been set to True. SubscriptionConditionReady = duckv1alpha1.ConditionReady @@ -54,3 +58,13 @@ func (ss *SubscriptionStatus) MarkReferencesResolved() { func (ss *SubscriptionStatus) MarkChannelReady() { subCondSet.Manage(ss).MarkTrue(SubscriptionConditionChannelReady) } + +// MarkReferencesNotResolved sets the ReferencesResolved condition to False state. +func (ss *SubscriptionStatus) MarkReferencesNotResolved(reason, messageFormat string, messageA ...interface{}) { + subCondSet.Manage(ss).MarkFalse(SubscriptionConditionReferencesResolved, reason, messageFormat, messageA...) +} + +// MarkChannelNotReady sets the ChannelReady condition to False state. +func (ss *SubscriptionStatus) MarkChannelNotReady(reason, messageFormat string, messageA ...interface{}) { + subCondSet.Manage(ss).MarkFalse(SubscriptionConditionChannelReady, reason, messageFormat, messageA) +} diff --git a/pkg/apis/eventing/v1alpha1/subscription_types.go b/pkg/apis/eventing/v1alpha1/subscription_types.go index 013e2fab0c6..fd2166bfa23 100644 --- a/pkg/apis/eventing/v1alpha1/subscription_types.go +++ b/pkg/apis/eventing/v1alpha1/subscription_types.go @@ -165,10 +165,6 @@ type ReplyStrategy struct { Channel *corev1.ObjectReference `json:"channel,omitempty"` } -// subCondSet is a condition set with Ready as the happy condition and -// ReferencesResolved and ChannelReady as the dependent conditions. -var subCondSet = duckv1alpha1.NewLivingConditionSet(SubscriptionConditionReferencesResolved, SubscriptionConditionChannelReady) - // SubscriptionStatus (computed) for a subscription type SubscriptionStatus struct { // inherits duck/v1alpha1 Status, which currently provides: diff --git a/pkg/reconciler/subscription/subscription.go b/pkg/reconciler/subscription/subscription.go index 1bd8bbf9171..42707a2e195 100644 --- a/pkg/reconciler/subscription/subscription.go +++ b/pkg/reconciler/subscription/subscription.go @@ -176,6 +176,7 @@ func (r *Reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subsc zap.Error(err), zap.Any("channel", subscription.Spec.Channel)) r.Recorder.Eventf(subscription, corev1.EventTypeWarning, channelReferenceFetchFailed, "Failed to validate spec.channel exists: %v", err) + subscription.Status.MarkReferencesNotResolved(channelReferenceFetchFailed, "Failed to validate spec.channel exists: %v", err) return err } @@ -191,6 +192,7 @@ func (r *Reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subsc zap.Error(err), zap.Any("subscriber", subscription.Spec.Subscriber)) r.Recorder.Eventf(subscription, corev1.EventTypeWarning, subscriberResolveFailed, "Failed to resolve spec.subscriber: %v", err) + subscription.Status.MarkReferencesNotResolved(subscriberResolveFailed, "Failed to resolve spec.subscriber: %v", err) return err } @@ -203,6 +205,7 @@ func (r *Reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subsc zap.Error(err), zap.Any("reply", subscription.Spec.Reply)) r.Recorder.Eventf(subscription, corev1.EventTypeWarning, resultResolveFailed, "Failed to resolve spec.reply: %v", err) + subscription.Status.MarkReferencesNotResolved(resultResolveFailed, "Failed to resolve spec.reply: %v", err) return err } @@ -221,6 +224,7 @@ func (r *Reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subsc if err := r.syncPhysicalChannel(ctx, subscription, false); err != nil { logging.FromContext(ctx).Warn("Failed to sync physical Channel", zap.Error(err)) r.Recorder.Eventf(subscription, corev1.EventTypeWarning, physicalChannelSyncFailed, "Failed to sync physical Channel: %v", err) + subscription.Status.MarkChannelNotReady(physicalChannelSyncFailed, "Failed to sync physical Channel: %v", err) return err } // Everything went well, set the fact that subscriptions have been modified From 744b4bd7240be1a18ce5814ddd320edff9a82f70 Mon Sep 17 00:00:00 2001 From: nachocano Date: Wed, 22 May 2019 17:47:50 -0700 Subject: [PATCH 35/64] marking subscription as not ready... --- .../subscription/subscription_test.go | 29 +++++++++++++++++++ pkg/reconciler/testing/subscription.go | 6 ++++ 2 files changed, 35 insertions(+) diff --git a/pkg/reconciler/subscription/subscription_test.go b/pkg/reconciler/subscription/subscription_test.go index e7794af4e67..bb912ed8d10 100644 --- a/pkg/reconciler/subscription/subscription_test.go +++ b/pkg/reconciler/subscription/subscription_test.go @@ -113,6 +113,31 @@ func TestAllCases(t *testing.T) { // WantEvents: []string{ // Eventf(corev1.EventTypeWarning, "ChannelReferenceFetchFailed", "Failed to validate spec.channel exists: s \"\" not found"), // }, + }, { + Name: "subscription, but channel does not exist", + Objects: []runtime.Object{ + NewSubscription(subscriptionName, testNS, + WithSubscriptionUID(subscriptionUID), + WithSubscriptionChannel(channelGVK, channelName), + WithSubscriptionSubscriberRef(subscriberGVK, subscriberName), + ), + NewUnstructured(subscriberGVK, subscriberName, testNS), + }, + Key: testNS + "/" + subscriptionName, + WantErr: true, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "ChannelReferenceFetchFailed", "Failed to validate spec.channel exists: channels.eventing.knative.dev %q not found", channelName), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: NewSubscription(subscriptionName, testNS, + WithSubscriptionUID(subscriptionUID), + WithSubscriptionChannel(channelGVK, channelName), + WithSubscriptionSubscriberRef(subscriberGVK, subscriberName), + // The first reconciliation will initialize the status conditions. + WithInitSubscriptionConditions, + WithSubscriptionReferencesNotResolved(channelReferenceFetchFailed, fmt.Sprintf("Failed to validate spec.channel exists: channels.eventing.knative.dev %q not found", channelName)), + ), + }}, }, { Name: "subscription, but subscriber is not addressable", Objects: []runtime.Object{ @@ -139,6 +164,7 @@ func TestAllCases(t *testing.T) { WithSubscriptionSubscriberRef(subscriberGVK, subscriberName), // The first reconciliation will initialize the status conditions. WithInitSubscriptionConditions, + WithSubscriptionReferencesNotResolved(subscriberResolveFailed, "Failed to resolve spec.subscriber: status does not contain address"), ), }}, }, { @@ -166,6 +192,7 @@ func TestAllCases(t *testing.T) { WithSubscriptionSubscriberRef(subscriberGVK, subscriberName), // The first reconciliation will initialize the status conditions. WithInitSubscriptionConditions, + WithSubscriptionReferencesNotResolved(subscriberResolveFailed, fmt.Sprintf("Failed to resolve spec.subscriber: subscribers.eventing.knative.dev %q not found", subscriberName)), ), }}, }, { @@ -198,6 +225,7 @@ func TestAllCases(t *testing.T) { // The first reconciliation will initialize the status conditions. WithInitSubscriptionConditions, WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI), + WithSubscriptionReferencesNotResolved(resultResolveFailed, fmt.Sprintf("Failed to resolve spec.reply: channels.eventing.knative.dev %q not found", replyName)), ), }}, }, { @@ -472,6 +500,7 @@ func TestAllCases(t *testing.T) { WithSubscriptionSubscriberRef(serviceGVK, serviceName), // The first reconciliation will initialize the status conditions. WithInitSubscriptionConditions, + WithSubscriptionReferencesNotResolved(subscriberResolveFailed, fmt.Sprintf("Failed to resolve spec.subscriber: services %q not found", serviceName)), ), }}, }, { diff --git a/pkg/reconciler/testing/subscription.go b/pkg/reconciler/testing/subscription.go index 91d3eb9d687..4857e8dfc44 100644 --- a/pkg/reconciler/testing/subscription.go +++ b/pkg/reconciler/testing/subscription.go @@ -143,6 +143,12 @@ func MarkSubscriptionReady(s *v1alpha1.Subscription) { s.Status.MarkReferencesResolved() } +func WithSubscriptionReferencesNotResolved(reason, msg string) SubscriptionOption { + return func(s *v1alpha1.Subscription) { + s.Status.MarkReferencesNotResolved(reason, msg) + } +} + func WithSubscriptionReply(gvk metav1.GroupVersionKind, name string) SubscriptionOption { return func(s *v1alpha1.Subscription) { s.Spec.Reply = &v1alpha1.ReplyStrategy{ From 036e7143439f64c625cbb2ec2f661b678ccc3b6d Mon Sep 17 00:00:00 2001 From: nachocano Date: Wed, 22 May 2019 18:15:47 -0700 Subject: [PATCH 36/64] rolling back the change of removing the channelInformer --- cmd/controller/main.go | 1 + pkg/reconciler/subscription/subscription.go | 9 +++++++++ pkg/reconciler/subscription/subscription_test.go | 3 ++- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 59387fc6484..2444b02d79e 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -106,6 +106,7 @@ func main() { subscription.NewController( opt, subscriptionInformer, + channelInformer, addressableInformer, ), namespace.NewController( diff --git a/pkg/reconciler/subscription/subscription.go b/pkg/reconciler/subscription/subscription.go index 42707a2e195..5c2a0cd6278 100644 --- a/pkg/reconciler/subscription/subscription.go +++ b/pkg/reconciler/subscription/subscription.go @@ -82,6 +82,7 @@ var _ controller.Reconciler = (*Reconciler)(nil) func NewController( opt reconciler.Options, subscriptionInformer eventinginformers.SubscriptionInformer, + channelInformer eventinginformers.ChannelInformer, addressableInformer eventingduck.AddressableInformer, ) *controller.Impl { @@ -98,6 +99,14 @@ func NewController( // Tracker is used to notify us when the resources Subscription depends on change, so that the // Subscription needs to reconcile again. r.tracker = tracker.New(impl.EnqueueKey, opt.GetTrackerLease()) + channelInformer.Informer().AddEventHandler(reconciler.Handler( + // Call the tracker's OnChanged method, but we've seen the objects coming through this path + // missing TypeMeta, so ensure it is properly populated. + controller.EnsureTypeMeta( + r.tracker.OnChanged, + v1alpha1.SchemeGroupVersion.WithKind("Channel"), + ), + )) return impl } diff --git a/pkg/reconciler/subscription/subscription_test.go b/pkg/reconciler/subscription/subscription_test.go index bb912ed8d10..ceec5d908b7 100644 --- a/pkg/reconciler/subscription/subscription_test.go +++ b/pkg/reconciler/subscription/subscription_test.go @@ -656,12 +656,13 @@ func TestNew(t *testing.T) { eventingInformer := informers.NewSharedInformerFactory(eventingClient, 0) subscriptionInformer := eventingInformer.Eventing().V1alpha1().Subscriptions() + channelInformer := eventingInformer.Eventing().V1alpha1().Channels() addressableInformer := &fakeAddressableInformer{} c := NewController(reconciler.Options{ KubeClientSet: kubeClient, EventingClientSet: eventingClient, Logger: logtesting.TestLogger(t), - }, subscriptionInformer, addressableInformer) + }, subscriptionInformer, channelInformer, addressableInformer) if c == nil { t.Fatal("Expected NewController to return a non-nil value") From 61ebc8c9f1d097017eacdcb8485142325f95456c Mon Sep 17 00:00:00 2001 From: nachocano Date: Wed, 22 May 2019 18:17:37 -0700 Subject: [PATCH 37/64] add TODO --- pkg/reconciler/subscription/subscription.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/reconciler/subscription/subscription.go b/pkg/reconciler/subscription/subscription.go index 5c2a0cd6278..b9edd369f81 100644 --- a/pkg/reconciler/subscription/subscription.go +++ b/pkg/reconciler/subscription/subscription.go @@ -99,6 +99,7 @@ func NewController( // Tracker is used to notify us when the resources Subscription depends on change, so that the // Subscription needs to reconcile again. r.tracker = tracker.New(impl.EnqueueKey, opt.GetTrackerLease()) + // TODO further analyze if this informer can be removed. channelInformer.Informer().AddEventHandler(reconciler.Handler( // Call the tracker's OnChanged method, but we've seen the objects coming through this path // missing TypeMeta, so ensure it is properly populated. From 6632fbcd613e2683c3a2da511ac8147a3bda1757 Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 09:33:41 -0700 Subject: [PATCH 38/64] moved it to controller package as I'll be creating a dispatcher one --- contrib/kafka/cmd/channel_controller/main.go | 2 +- contrib/kafka/pkg/reconciler/{ => controller}/kafkachannel.go | 4 ++-- .../pkg/reconciler/{ => controller}/kafkachannel_test.go | 2 +- .../kafka/pkg/reconciler/{ => controller}/resources/client.go | 0 .../pkg/reconciler/{ => controller}/resources/service.go | 0 .../pkg/reconciler/{ => controller}/resources/service_test.go | 0 .../kafka/pkg/reconciler/{ => controller}/resources/topic.go | 0 7 files changed, 4 insertions(+), 4 deletions(-) rename contrib/kafka/pkg/reconciler/{ => controller}/kafkachannel.go (99%) rename contrib/kafka/pkg/reconciler/{ => controller}/kafkachannel_test.go (99%) rename contrib/kafka/pkg/reconciler/{ => controller}/resources/client.go (100%) rename contrib/kafka/pkg/reconciler/{ => controller}/resources/service.go (100%) rename contrib/kafka/pkg/reconciler/{ => controller}/resources/service_test.go (100%) rename contrib/kafka/pkg/reconciler/{ => controller}/resources/topic.go (100%) diff --git a/contrib/kafka/cmd/channel_controller/main.go b/contrib/kafka/cmd/channel_controller/main.go index d6d867f3894..1f2d23a72bc 100644 --- a/contrib/kafka/cmd/channel_controller/main.go +++ b/contrib/kafka/cmd/channel_controller/main.go @@ -25,7 +25,7 @@ import ( clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" informers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions" - kafkachannel "github.com/knative/eventing/contrib/kafka/pkg/reconciler" + kafkachannel "github.com/knative/eventing/contrib/kafka/pkg/reconciler/controller" "github.com/knative/eventing/pkg/logconfig" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/reconciler" diff --git a/contrib/kafka/pkg/reconciler/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go similarity index 99% rename from contrib/kafka/pkg/reconciler/kafkachannel.go rename to contrib/kafka/pkg/reconciler/controller/kafkachannel.go index 877afa2fc1d..7aefc1e61bb 100644 --- a/contrib/kafka/pkg/reconciler/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package reconciler +package controller import ( "context" @@ -30,7 +30,7 @@ import ( "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" messaginginformers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1" listers "github.com/knative/eventing/contrib/kafka/pkg/client/listers/messaging/v1alpha1" - "github.com/knative/eventing/contrib/kafka/pkg/reconciler/resources" + "github.com/knative/eventing/contrib/kafka/pkg/reconciler/controller/resources" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/reconciler" "github.com/knative/pkg/controller" diff --git a/contrib/kafka/pkg/reconciler/kafkachannel_test.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel_test.go similarity index 99% rename from contrib/kafka/pkg/reconciler/kafkachannel_test.go rename to contrib/kafka/pkg/reconciler/controller/kafkachannel_test.go index 374a28ac1fa..b40f9a01d6c 100644 --- a/contrib/kafka/pkg/reconciler/kafkachannel_test.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package reconciler +package controller import ( "fmt" diff --git a/contrib/kafka/pkg/reconciler/resources/client.go b/contrib/kafka/pkg/reconciler/controller/resources/client.go similarity index 100% rename from contrib/kafka/pkg/reconciler/resources/client.go rename to contrib/kafka/pkg/reconciler/controller/resources/client.go diff --git a/contrib/kafka/pkg/reconciler/resources/service.go b/contrib/kafka/pkg/reconciler/controller/resources/service.go similarity index 100% rename from contrib/kafka/pkg/reconciler/resources/service.go rename to contrib/kafka/pkg/reconciler/controller/resources/service.go diff --git a/contrib/kafka/pkg/reconciler/resources/service_test.go b/contrib/kafka/pkg/reconciler/controller/resources/service_test.go similarity index 100% rename from contrib/kafka/pkg/reconciler/resources/service_test.go rename to contrib/kafka/pkg/reconciler/controller/resources/service_test.go diff --git a/contrib/kafka/pkg/reconciler/resources/topic.go b/contrib/kafka/pkg/reconciler/controller/resources/topic.go similarity index 100% rename from contrib/kafka/pkg/reconciler/resources/topic.go rename to contrib/kafka/pkg/reconciler/controller/resources/topic.go From e043ae5613df8d3a971db7541a53588d0f8a2503 Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 10:01:32 -0700 Subject: [PATCH 39/64] wip, not compiling --- contrib/kafka/cmd/channel_dispatcher/main.go | 186 +++++--- contrib/kafka/cmd/channel_dispatcher/main1.go | 98 ++++ contrib/kafka/config/400-kafka-config.yaml | 2 +- contrib/kafka/config/kafka.yaml | 2 +- .../pkg/reconciler/dispatcher/kafkachannel.go | 430 ++++++++++++++++++ 5 files changed, 665 insertions(+), 53 deletions(-) create mode 100644 contrib/kafka/cmd/channel_dispatcher/main1.go create mode 100644 contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go diff --git a/contrib/kafka/cmd/channel_dispatcher/main.go b/contrib/kafka/cmd/channel_dispatcher/main.go index 995710bca9d..f597e4bee47 100644 --- a/contrib/kafka/cmd/channel_dispatcher/main.go +++ b/contrib/kafka/cmd/channel_dispatcher/main.go @@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,79 +20,163 @@ import ( "flag" "github.com/knative/eventing/contrib/kafka/pkg/utils" "log" - - "github.com/knative/eventing/contrib/kafka/pkg/controller" - "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" - "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "github.com/knative/eventing/pkg/channelwatcher" - "github.com/knative/eventing/pkg/tracing" + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). + // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + informers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions" + kafkachannel "github.com/knative/eventing/contrib/kafka/pkg/reconciler/controller" + "github.com/knative/eventing/pkg/logconfig" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/reconciler" "github.com/knative/pkg/configmap" + kncontroller "github.com/knative/pkg/controller" "github.com/knative/pkg/signals" "github.com/knative/pkg/system" "go.uber.org/zap" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client/config" - "sigs.k8s.io/controller-runtime/pkg/manager" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +var ( + hardcodedLoggingConfig = flag.Bool("hardCodedLoggingConfig", false, "If true, use the hard coded logging config. It is intended to be used only when debugging outside a Kubernetes cluster.") + masterURL = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") + kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") ) -// TODO update the dispatcher in follow up PR. func main() { flag.Parse() - logger, err := zap.NewProduction() - if err != nil { - log.Fatalf("Unable to create logger: %v", err) - } - kafkaConfig, err := utils.GetKafkaConfig("/etc/config-kafka") - if err != nil { - logger.Fatal("Unable to load provisioner config", zap.Error(err)) - } + logger, atomicLevel := setupLogger() + defer logger.Sync() + + // set up signals so we handle the first shutdown signal gracefully + stopCh := signals.SetupSignalHandler() - mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + cfg, err := clientcmd.BuildConfigFromFlags(*masterURL, *kubeconfig) if err != nil { - logger.Fatal("Unable to create manager.", zap.Error(err)) + logger.Fatalw("Error building kubeconfig", zap.Error(err)) } - kafkaDispatcher, err := dispatcher.NewDispatcher(kafkaConfig.Brokers, kafkaConfig.ConsumerMode, logger) + kafkaConfig, err := utils.GetKafkaConfig("/etc/config-kafka") if err != nil { - logger.Fatal("Unable to create kafka dispatcher", zap.Error(err)) - } - if err = mgr.Add(kafkaDispatcher); err != nil { - logger.Fatal("Unable to add the kafka dispatcher", zap.Error(err)) + logger.Fatalw("Error loading kafka config", zap.Error(err)) } - if err := v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { - logger.Fatal("Unable to add scheme for eventing apis.", zap.Error(err)) + logger = logger.With(zap.String("controller/impl", "pkg")) + logger.Info("Starting the Kafka dispatcher") + + systemNS := system.Namespace() + + const numControllers = 1 + cfg.QPS = numControllers * rest.DefaultQPS + cfg.Burst = numControllers * rest.DefaultBurst + opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh) + // Setting up our own eventingClientSet as we need the messaging API introduced with kafka. + eventingClientSet := clientset.NewForConfigOrDie(cfg) + + kubeInformerFactory := kubeinformers.NewSharedInformerFactory(opt.KubeClientSet, opt.ResyncPeriod) + eventingInformerFactory := informers.NewSharedInformerFactory(eventingClientSet, opt.ResyncPeriod) + + // Messaging + kafkaChannelInformer := eventingInformerFactory.Messaging().V1alpha1().KafkaChannels() + + // Kube + serviceInformer := kubeInformerFactory.Core().V1().Services() + endpointsInformer := kubeInformerFactory.Core().V1().Endpoints() + deploymentInformer := kubeInformerFactory.Apps().V1().Deployments() + + // Build all of our controllers, with the clients constructed above. + // Add new controllers to this array. + // You also need to modify numControllers above to match this. + controllers := [...]*kncontroller.Impl{ + kafkachannel.NewController( + opt, + eventingClientSet, + kafkaConfig, + systemNS, + dispatcherDeploymentName, + dispatcherServiceName, + kafkaChannelInformer, + deploymentInformer, + serviceInformer, + endpointsInformer, + ), } - - // Zipkin tracing. - kc := kubernetes.NewForConfigOrDie(mgr.GetConfig()) - configMapWatcher := configmap.NewInformedWatcher(kc, system.Namespace()) - if err = tracing.SetupDynamicZipkinPublishing(logger.Sugar(), configMapWatcher, "kafka-ch-dispatcher"); err != nil { - logger.Fatal("Error setting up Zipkin publishing", zap.Error(err)) + // This line asserts at compile time that the length of controllers is equal to numControllers. + // It is based on https://go101.org/article/tips.html#assert-at-compile-time, which notes that + // var _ [N-M]int + // asserts at compile time that N >= M, which we can use to establish equality of N and M: + // (N >= M) && (M >= N) => (N == M) + var _ [numControllers - len(controllers)][len(controllers) - numControllers]int + + // Watch the logging config map and dynamically update logging levels. + opt.ConfigMapWatcher.Watch(logconfig.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.Controller)) + // TODO: Watch the observability config map and dynamically update metrics exporter. + //opt.ConfigMapWatcher.Watch(metrics.ObservabilityConfigName, metrics.UpdateExporterFromConfigMap(component, logger)) + if err := opt.ConfigMapWatcher.Start(stopCh); err != nil { + logger.Fatalw("failed to start configuration manager", zap.Error(err)) } - if err = channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)); err != nil { - logger.Fatal("Unable to create channel watcher.", zap.Error(err)) + // Start all of the informers and wait for them to sync. + logger.Info("Starting informers.") + if err := kncontroller.StartInformers( + stopCh, + // Messaging + kafkaChannelInformer.Informer(), + + // Kube + serviceInformer.Informer(), + deploymentInformer.Informer(), + endpointsInformer.Informer(), + ); err != nil { + logger.Fatalf("Failed to start informers: %v", err) } - // set up signals so we handle the first shutdown signal gracefully - stopCh := signals.SetupSignalHandler() - - // configMapWatcher does not block, so start it first. - if err = configMapWatcher.Start(stopCh); err != nil { - logger.Fatal("Failed to start ConfigMap watcher", zap.Error(err)) - } + logger.Info("Starting controllers.") + kncontroller.StartAll(stopCh, controllers[:]...) +} - // Start blocks forever. - err = mgr.Start(stopCh) +func setupLogger() (*zap.SugaredLogger, zap.AtomicLevel) { + // Set up our logger. + loggingConfigMap := getLoggingConfigOrDie() + loggingConfig, err := logging.NewConfigFromMap(loggingConfigMap) if err != nil { - logger.Fatal("Manager.Start() returned an error", zap.Error(err)) + log.Fatalf("Error parsing logging configuration: %v", err) } - logger.Info("Exiting...") + return logging.NewLoggerFromConfig(loggingConfig, logconfig.Controller) } -func shouldWatch(ch *v1alpha1.Channel) bool { - return ch.Spec.Provisioner != nil && - ch.Spec.Provisioner.Namespace == "" && - ch.Spec.Provisioner.Name == controller.Name +func getLoggingConfigOrDie() map[string]string { + if hardcodedLoggingConfig != nil && *hardcodedLoggingConfig { + return map[string]string{ + "loglevel.controller": "info", + "zap-logger-config": ` + { + "level": "info", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + }`, + } + } else { + cm, err := configmap.Load("/etc/config-logging") + if err != nil { + log.Fatalf("Error loading logging configuration: %v", err) + } + return cm + } } diff --git a/contrib/kafka/cmd/channel_dispatcher/main1.go b/contrib/kafka/cmd/channel_dispatcher/main1.go new file mode 100644 index 00000000000..995710bca9d --- /dev/null +++ b/contrib/kafka/cmd/channel_dispatcher/main1.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "github.com/knative/eventing/contrib/kafka/pkg/utils" + "log" + + "github.com/knative/eventing/contrib/kafka/pkg/controller" + "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/channelwatcher" + "github.com/knative/eventing/pkg/tracing" + "github.com/knative/pkg/configmap" + "github.com/knative/pkg/signals" + "github.com/knative/pkg/system" + "go.uber.org/zap" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +// TODO update the dispatcher in follow up PR. +func main() { + flag.Parse() + logger, err := zap.NewProduction() + if err != nil { + log.Fatalf("Unable to create logger: %v", err) + } + kafkaConfig, err := utils.GetKafkaConfig("/etc/config-kafka") + if err != nil { + logger.Fatal("Unable to load provisioner config", zap.Error(err)) + } + + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + logger.Fatal("Unable to create manager.", zap.Error(err)) + } + + kafkaDispatcher, err := dispatcher.NewDispatcher(kafkaConfig.Brokers, kafkaConfig.ConsumerMode, logger) + if err != nil { + logger.Fatal("Unable to create kafka dispatcher", zap.Error(err)) + } + if err = mgr.Add(kafkaDispatcher); err != nil { + logger.Fatal("Unable to add the kafka dispatcher", zap.Error(err)) + } + + if err := v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { + logger.Fatal("Unable to add scheme for eventing apis.", zap.Error(err)) + } + + // Zipkin tracing. + kc := kubernetes.NewForConfigOrDie(mgr.GetConfig()) + configMapWatcher := configmap.NewInformedWatcher(kc, system.Namespace()) + if err = tracing.SetupDynamicZipkinPublishing(logger.Sugar(), configMapWatcher, "kafka-ch-dispatcher"); err != nil { + logger.Fatal("Error setting up Zipkin publishing", zap.Error(err)) + } + + if err = channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)); err != nil { + logger.Fatal("Unable to create channel watcher.", zap.Error(err)) + } + + // set up signals so we handle the first shutdown signal gracefully + stopCh := signals.SetupSignalHandler() + + // configMapWatcher does not block, so start it first. + if err = configMapWatcher.Start(stopCh); err != nil { + logger.Fatal("Failed to start ConfigMap watcher", zap.Error(err)) + } + + // Start blocks forever. + err = mgr.Start(stopCh) + if err != nil { + logger.Fatal("Manager.Start() returned an error", zap.Error(err)) + } + logger.Info("Exiting...") +} + +func shouldWatch(ch *v1alpha1.Channel) bool { + return ch.Spec.Provisioner != nil && + ch.Spec.Provisioner.Namespace == "" && + ch.Spec.Provisioner.Name == controller.Name +} diff --git a/contrib/kafka/config/400-kafka-config.yaml b/contrib/kafka/config/400-kafka-config.yaml index 535ff48a812..2d43a9789b1 100644 --- a/contrib/kafka/config/400-kafka-config.yaml +++ b/contrib/kafka/config/400-kafka-config.yaml @@ -20,7 +20,7 @@ metadata: data: # Broker URL. Replace this with the URLs for your kafka cluster, # which is in the format of my-cluster-kafka-bootstrap.my-kafka-namespace:9092. - bootstrap_servers: REPLACE_WITH_CLUSTER_URL + bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index fcba51067cf..46a57b4346c 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -99,7 +99,7 @@ metadata: data: # Broker URL's for the provisioner. Replace this with the URL's for your kafka cluster, # which is in the format of my-cluster-kafka-bootstrap.my-kafka-namespace:9092. - bootstrap_servers: REPLACE_WITH_CLUSTER_URL + bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. diff --git a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go new file mode 100644 index 00000000000..2357a0003b2 --- /dev/null +++ b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go @@ -0,0 +1,430 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/json" + "fmt" + "github.com/knative/eventing/contrib/kafka/pkg/utils" + "github.com/knative/eventing/pkg/reconciler/names" + "reflect" + "time" + + "github.com/Shopify/sarama" + "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" + "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + messaginginformers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1" + listers "github.com/knative/eventing/contrib/kafka/pkg/client/listers/messaging/v1alpha1" + "github.com/knative/eventing/contrib/kafka/pkg/reconciler/controller/resources" + "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/reconciler" + "github.com/knative/pkg/controller" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + appsv1informers "k8s.io/client-go/informers/apps/v1" + corev1informers "k8s.io/client-go/informers/core/v1" + appsv1listers "k8s.io/client-go/listers/apps/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" +) + +const ( + // ReconcilerName is the name of the reconciler. + ReconcilerName = "KafkaChannels" + + // controllerAgentName is the string used by this controller to identify + // itself when creating events. + controllerAgentName = "kafka-ch-dispatcher" + + finalizerName = controllerAgentName + + // Name of the corev1.Events emitted from the reconciliation process. + dispatcherReconciled = "DispatcherReconciled" + dispatcherReconcileFailed = "DispatcherReconcileFailed" + dispatcherUpdateStatusFailed = "DispatcherUpdateStatusFailed" +) + +// Reconciler reconciles Kafka Channels. +type Reconciler struct { + *reconciler.Base + + kafkaConfig *utils.KafkaConfig + + // Using a shared kafkaClusterAdmin does not work currently because of an issue with + // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. + kafkaClusterAdmin sarama.ClusterAdmin + + eventingClientSet *versioned.Clientset + kafkachannelLister listers.KafkaChannelLister + kafkachannelInformer cache.SharedIndexInformer + impl *controller.Impl +} + +var ( + deploymentGVK = appsv1.SchemeGroupVersion.WithKind("Deployment") + serviceGVK = corev1.SchemeGroupVersion.WithKind("Service") +) + +// Check that our Reconciler implements controller.Reconciler. +var _ controller.Reconciler = (*Reconciler)(nil) + +// Check that our Reconciler implements cache.ResourceEventHandler +var _ cache.ResourceEventHandler = (*Reconciler)(nil) + +// NewController initializes the controller and is called by the generated code. +// Registers event handlers to enqueue events. +func NewController( + opt reconciler.Options, + eventingClientSet *versioned.Clientset, + kafkaConfig *utils.KafkaConfig, + kafkachannelInformer messaginginformers.KafkaChannelInformer, +) *controller.Impl { + + r := &Reconciler{ + Base: reconciler.NewBase(opt, controllerAgentName), + kafkaConfig: kafkaConfig, + eventingClientSet: eventingClientSet, + kafkachannelLister: kafkachannelInformer.Lister(), + kafkachannelInformer: kafkachannelInformer.Informer(), + } + r.impl = controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger)) + + r.Logger.Info("Setting up event handlers") + + // Watch for kafka channels. + kafkachannelInformer.Informer().AddEventHandler(reconciler.Handler(r.impl.Enqueue)) + + return r.impl +} + +// cache.ResourceEventHandler implementation. +// These 3 functions just cause a Global Resync of the channels, because any changes here +// should be reflected onto the channels. +func (r *Reconciler) OnAdd(obj interface{}) { + r.impl.GlobalResync(r.kafkachannelInformer) +} + +func (r *Reconciler) OnUpdate(old, new interface{}) { + r.impl.GlobalResync(r.kafkachannelInformer) +} + +func (r *Reconciler) OnDelete(obj interface{}) { + r.impl.GlobalResync(r.kafkachannelInformer) +} + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the KafkaChannel resource +// with the current status of the resource. +func (r *Reconciler) Reconcile(ctx context.Context, key string) error { + // Convert the namespace/name string into a distinct namespace and name. + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logging.FromContext(ctx).Error("invalid resource key") + return nil + } + + // Get the KafkaChannel resource with this namespace/name. + original, err := r.kafkachannelLister.KafkaChannels(namespace).Get(name) + if apierrs.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing. + logging.FromContext(ctx).Error("KafkaChannel key in work queue no longer exists") + return nil + } else if err != nil { + return err + } + + // Don't modify the informers copy. + channel := original.DeepCopy() + + // Reconcile this copy of the KafkaChannel and then write back any status updates regardless of + // whether the reconcile error out. + reconcileErr := r.reconcile(ctx, channel) + if reconcileErr != nil { + logging.FromContext(ctx).Error("Error reconciling KafkaChannel", zap.Error(reconcileErr)) + r.Recorder.Eventf(channel, corev1.EventTypeWarning, channelReconcileFailed, "KafkaChannel reconciliation failed: %v", reconcileErr) + } else { + logging.FromContext(ctx).Debug("KafkaChannel reconciled") + r.Recorder.Event(channel, corev1.EventTypeNormal, channelReconciled, "KafkaChannel reconciled") + } + + if _, updateStatusErr := r.updateStatus(ctx, channel); updateStatusErr != nil { + logging.FromContext(ctx).Error("Failed to update GoogleCloudPubSubChannel status", zap.Error(updateStatusErr)) + r.Recorder.Eventf(channel, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update KafkaChannel's status: %v", updateStatusErr) + return updateStatusErr + } + + // Requeue if the resource is not ready + return reconcileErr +} + +func (r *Reconciler) reconcile(ctx context.Context, kc *v1alpha1.KafkaChannel) error { + kc.Status.InitializeConditions() + + logger := logging.FromContext(ctx) + // Verify channel is valid. + if err := kc.Validate(ctx); err != nil { + logger.Error("Invalid kafka channel", zap.String("channel", kc.Name), zap.Error(err)) + return err + } + + kafkaClusterAdmin, err := r.createClient(ctx, kc) + if err != nil { + logger.Error("Unable to build kafka admin client", zap.String("channel", kc.Name), zap.Error(err)) + return err + } + + // See if the channel has been deleted. + if kc.DeletionTimestamp != nil { + if err := r.deleteTopic(ctx, kc, kafkaClusterAdmin); err != nil { + return err + } + removeFinalizer(kc) + _, err := r.eventingClientSet.MessagingV1alpha1().KafkaChannels(kc.Namespace).Update(kc) + return err + } + + // If we are adding the finalizer for the first time, then ensure that finalizer is persisted + // before manipulating Kafka. + if err := r.ensureFinalizer(kc); err != nil { + return err + } + + // We reconcile the status of the Channel by looking at: + // 1. Kafka topic used by the channel. + // 2. Dispatcher Deployment for it's readiness. + // 3. Dispatcher k8s Service for it's existence. + // 4. Dispatcher endpoints to ensure that there's something backing the Service. + // 5. K8s service representing the channel that will use ExternalName to point to the Dispatcher k8s service. + + if err := r.createTopic(ctx, kc, kafkaClusterAdmin); err != nil { + kc.Status.MarkTopicFailed("TopicCreateFailed", "error while creating topic: %s", err) + return err + } + kc.Status.MarkTopicTrue() + + // Get the Dispatcher Deployment and propagate the status to the Channel + d, err := r.deploymentLister.Deployments(r.dispatcherNamespace).Get(r.dispatcherDeploymentName) + if err != nil { + if apierrs.IsNotFound(err) { + kc.Status.MarkDispatcherFailed("DispatcherDeploymentDoesNotExist", "Dispatcher Deployment does not exist") + } else { + logger.Error("Unable to get the dispatcher Deployment", zap.Error(err)) + kc.Status.MarkDispatcherFailed("DispatcherDeploymentGetFailed", "Failed to get dispatcher Deployment") + } + return err + } + kc.Status.PropagateDispatcherStatus(&d.Status) + + // Get the Dispatcher Service and propagate the status to the Channel in case it does not exist. + // We don't do anything with the service because it's status contains nothing useful, so just do + // an existence check. Then below we check the endpoints targeting it. + _, err = r.serviceLister.Services(r.dispatcherNamespace).Get(r.dispatcherServiceName) + if err != nil { + if apierrs.IsNotFound(err) { + kc.Status.MarkServiceFailed("DispatcherServiceDoesNotExist", "Dispatcher Service does not exist") + } else { + logger.Error("Unable to get the dispatcher service", zap.Error(err)) + kc.Status.MarkServiceFailed("DispatcherServiceGetFailed", "Failed to get dispatcher service") + } + return err + } + kc.Status.MarkServiceTrue() + + // Get the Dispatcher Service Endpoints and propagate the status to the Channel + // endpoints has the same name as the service, so not a bug. + e, err := r.endpointsLister.Endpoints(r.dispatcherNamespace).Get(r.dispatcherServiceName) + if err != nil { + if apierrs.IsNotFound(err) { + kc.Status.MarkEndpointsFailed("DispatcherEndpointsDoesNotExist", "Dispatcher Endpoints does not exist") + } else { + logger.Error("Unable to get the dispatcher endpoints", zap.Error(err)) + kc.Status.MarkEndpointsFailed("DispatcherEndpointsGetFailed", "Failed to get dispatcher endpoints") + } + return err + } + + if len(e.Subsets) == 0 { + logger.Error("No endpoints found for Dispatcher service", zap.Error(err)) + kc.Status.MarkEndpointsFailed("DispatcherEndpointsNotReady", "There are no endpoints ready for Dispatcher service") + return fmt.Errorf("there are no endpoints ready for Dispatcher service %s", r.dispatcherServiceName) + } + kc.Status.MarkEndpointsTrue() + + // Reconcile the k8s service representing the actual Channel. It points to the Dispatcher service via ExternalName + svc, err := r.reconcileChannelService(ctx, kc) + if err != nil { + kc.Status.MarkChannelServiceFailed("ChannelServiceFailed", fmt.Sprintf("Channel Service failed: %s", err)) + return err + } + kc.Status.MarkChannelServiceTrue() + kc.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace)) + + // close the connection + err = kafkaClusterAdmin.Close() + if err != nil { + logger.Error("Error closing the connection", zap.Error(err)) + return err + } + + // Ok, so now the Dispatcher Deployment & Service have been created, we're golden since the + // dispatcher watches the Channel and where it needs to dispatch events to. + return nil +} + +func (r *Reconciler) reconcileChannelService(ctx context.Context, channel *v1alpha1.KafkaChannel) (*corev1.Service, error) { + logger := logging.FromContext(ctx) + // Get the Service and propagate the status to the Channel in case it does not exist. + // We don't do anything with the service because it's status contains nothing useful, so just do + // an existence check. Then below we check the endpoints targeting it. + // We may change this name later, so we have to ensure we use proper addressable when resolving these. + svc, err := r.serviceLister.Services(channel.Namespace).Get(resources.MakeChannelServiceName(channel.Name)) + if err != nil { + if apierrs.IsNotFound(err) { + svc, err = resources.MakeK8sService(channel, resources.ExternalService(r.dispatcherNamespace, r.dispatcherServiceName)) + if err != nil { + logger.Error("Failed to create the channel service object", zap.Error(err)) + return nil, err + } + svc, err = r.KubeClientSet.CoreV1().Services(channel.Namespace).Create(svc) + if err != nil { + logger.Error("Failed to create the channel service", zap.Error(err)) + return nil, err + } + return svc, nil + } else { + logger.Error("Unable to get the channel service", zap.Error(err)) + } + return nil, err + } + // Check to make sure that the KafkaChannel owns this service and if not, complain. + if !metav1.IsControlledBy(svc, channel) { + return nil, fmt.Errorf("kafkachannel: %s/%s does not own Service: %q", channel.Namespace, channel.Name, svc.Name) + } + return svc, nil +} + +func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) { + kc, err := r.kafkachannelLister.KafkaChannels(desired.Namespace).Get(desired.Name) + if err != nil { + return nil, err + } + + if reflect.DeepEqual(kc.Status, desired.Status) { + return kc, nil + } + + becomesReady := desired.Status.IsReady() && !kc.Status.IsReady() + + // Don't modify the informers copy. + existing := kc.DeepCopy() + existing.Status = desired.Status + + new, err := r.eventingClientSet.MessagingV1alpha1().KafkaChannels(desired.Namespace).UpdateStatus(existing) + if err == nil && becomesReady { + duration := time.Since(new.ObjectMeta.CreationTimestamp.Time) + r.Logger.Infof("KafkaChannel %q became ready after %v", kc.Name, duration) + if err := r.StatsReporter.ReportReady("Channel", kc.Namespace, kc.Name, duration); err != nil { + r.Logger.Infof("Failed to record ready for KafkaChannel %q: %v", kc.Name, err) + } + } + return new, err +} + +func (r *Reconciler) createClient(ctx context.Context, kc *v1alpha1.KafkaChannel) (sarama.ClusterAdmin, error) { + // We don't currently initialize r.kafkaClusterAdmin, hence we end up creating the cluster admin client every time. + // This is because of an issue with Shopify/sarama. See https://github.com/Shopify/sarama/issues/1162. + // Once the issue is fixed we should use a shared cluster admin client. Also, r.kafkaClusterAdmin is currently + // used to pass a fake admin client in the tests. + kafkaClusterAdmin := r.kafkaClusterAdmin + if kafkaClusterAdmin == nil { + var err error + kafkaClusterAdmin, err = resources.MakeClient(controllerAgentName, r.kafkaConfig.Brokers) + if err != nil { + return nil, err + } + } + return kafkaClusterAdmin, nil +} + +func (r *Reconciler) createTopic(ctx context.Context, channel *v1alpha1.KafkaChannel, kafkaClusterAdmin sarama.ClusterAdmin) error { + logger := logging.FromContext(ctx) + + topicName := resources.MakeTopicName(channel) + logger.Info("Creating topic on Kafka cluster", zap.String("topic", topicName)) + err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ + ReplicationFactor: channel.Spec.ReplicationFactor, + NumPartitions: channel.Spec.NumPartitions, + }, false) + if err == sarama.ErrTopicAlreadyExists { + return nil + } else if err != nil { + logger.Error("Error creating topic", zap.String("topic", topicName), zap.Error(err)) + } else { + logger.Info("Successfully created topic", zap.String("topic", topicName)) + } + return err +} + +func (r *Reconciler) deleteTopic(ctx context.Context, channel *v1alpha1.KafkaChannel, kafkaClusterAdmin sarama.ClusterAdmin) error { + logger := logging.FromContext(ctx) + + topicName := resources.MakeTopicName(channel) + logger.Info("Deleting topic on Kafka Cluster", zap.String("topic", topicName)) + err := kafkaClusterAdmin.DeleteTopic(topicName) + if err == sarama.ErrUnknownTopicOrPartition { + return nil + } else if err != nil { + logger.Error("Error deleting topic", zap.String("topic", topicName), zap.Error(err)) + } else { + logger.Info("Successfully deleted topic", zap.String("topic", topicName)) + } + return err +} + +func (r *Reconciler) ensureFinalizer(channel *v1alpha1.KafkaChannel) error { + finalizers := sets.NewString(channel.Finalizers...) + if finalizers.Has(finalizerName) { + return nil + } + + mergePatch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": append(channel.Finalizers, finalizerName), + "resourceVersion": channel.ResourceVersion, + }, + } + + patch, err := json.Marshal(mergePatch) + if err != nil { + return err + } + + _, err = r.eventingClientSet.MessagingV1alpha1().KafkaChannels(channel.Namespace).Patch(channel.Name, types.MergePatchType, patch) + return err +} + +func removeFinalizer(channel *v1alpha1.KafkaChannel) { + finalizers := sets.NewString(channel.Finalizers...) + finalizers.Delete(finalizerName) + channel.Finalizers = finalizers.List() +} From 13d24200bd5f48b3f99bbf082b62e904a66d518a Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 10:12:05 -0700 Subject: [PATCH 40/64] removing channelInformer from subscription controller --- cmd/controller/main.go | 1 - pkg/reconciler/subscription/subscription.go | 26 +++++++------------ .../subscription/subscription_test.go | 3 +-- 3 files changed, 11 insertions(+), 19 deletions(-) diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 2444b02d79e..59387fc6484 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -106,7 +106,6 @@ func main() { subscription.NewController( opt, subscriptionInformer, - channelInformer, addressableInformer, ), namespace.NewController( diff --git a/pkg/reconciler/subscription/subscription.go b/pkg/reconciler/subscription/subscription.go index b9edd369f81..8b2aba55961 100644 --- a/pkg/reconciler/subscription/subscription.go +++ b/pkg/reconciler/subscription/subscription.go @@ -82,7 +82,6 @@ var _ controller.Reconciler = (*Reconciler)(nil) func NewController( opt reconciler.Options, subscriptionInformer eventinginformers.SubscriptionInformer, - channelInformer eventinginformers.ChannelInformer, addressableInformer eventingduck.AddressableInformer, ) *controller.Impl { @@ -99,15 +98,6 @@ func NewController( // Tracker is used to notify us when the resources Subscription depends on change, so that the // Subscription needs to reconcile again. r.tracker = tracker.New(impl.EnqueueKey, opt.GetTrackerLease()) - // TODO further analyze if this informer can be removed. - channelInformer.Informer().AddEventHandler(reconciler.Handler( - // Call the tracker's OnChanged method, but we've seen the objects coming through this path - // missing TypeMeta, so ensure it is properly populated. - controller.EnsureTypeMeta( - r.tracker.OnChanged, - v1alpha1.SchemeGroupVersion.WithKind("Channel"), - ), - )) return impl } @@ -180,6 +170,16 @@ func (r *Reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subsc return err } + // Track the channel using the addressableInformer. + // We don't need the explicitly set a channelInformer, as this will dynamically generate one for us. + // This code needs to be called before checking the existence of the `channel`, in order to make sure the + // subscription controller will reconcile upon a `channel` change. + track := r.addressableInformer.TrackInNamespace(r.tracker, subscription) + if err := track(subscription.Spec.Channel); err != nil { + logging.FromContext(ctx).Error("Unable to track changes to spec.channel", zap.Error(err)) + return err + } + // Verify that `channel` exists. if _, err := eventingduck.ObjectReference(ctx, r.DynamicClientSet, subscription.Namespace, &subscription.Spec.Channel); err != nil { logging.FromContext(ctx).Warn("Failed to validate Channel exists", @@ -190,12 +190,6 @@ func (r *Reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subsc return err } - track := r.addressableInformer.TrackInNamespace(r.tracker, subscription) - if err := track(subscription.Spec.Channel); err != nil { - logging.FromContext(ctx).Error("Unable to track changes to spec.channel", zap.Error(err)) - return err - } - subscriberURI, err := eventingduck.SubscriberSpec(ctx, r.DynamicClientSet, subscription.Namespace, subscription.Spec.Subscriber, track) if err != nil { logging.FromContext(ctx).Warn("Failed to resolve Subscriber", diff --git a/pkg/reconciler/subscription/subscription_test.go b/pkg/reconciler/subscription/subscription_test.go index ceec5d908b7..bb912ed8d10 100644 --- a/pkg/reconciler/subscription/subscription_test.go +++ b/pkg/reconciler/subscription/subscription_test.go @@ -656,13 +656,12 @@ func TestNew(t *testing.T) { eventingInformer := informers.NewSharedInformerFactory(eventingClient, 0) subscriptionInformer := eventingInformer.Eventing().V1alpha1().Subscriptions() - channelInformer := eventingInformer.Eventing().V1alpha1().Channels() addressableInformer := &fakeAddressableInformer{} c := NewController(reconciler.Options{ KubeClientSet: kubeClient, EventingClientSet: eventingClient, Logger: logtesting.TestLogger(t), - }, subscriptionInformer, channelInformer, addressableInformer) + }, subscriptionInformer, addressableInformer) if c == nil { t.Fatal("Expected NewController to return a non-nil value") From c486ae0431c5da094eb27ecd3fa1c1d9d7c4bd8a Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 10:44:55 -0700 Subject: [PATCH 41/64] updates --- contrib/kafka/cmd/channel_dispatcher/main.go | 26 +------------------ .../pkg/reconciler/dispatcher/kafkachannel.go | 26 +++---------------- 2 files changed, 5 insertions(+), 47 deletions(-) diff --git a/contrib/kafka/cmd/channel_dispatcher/main.go b/contrib/kafka/cmd/channel_dispatcher/main.go index f597e4bee47..06edfe55572 100644 --- a/contrib/kafka/cmd/channel_dispatcher/main.go +++ b/contrib/kafka/cmd/channel_dispatcher/main.go @@ -20,21 +20,17 @@ import ( "flag" "github.com/knative/eventing/contrib/kafka/pkg/utils" "log" - // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" informers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions" - kafkachannel "github.com/knative/eventing/contrib/kafka/pkg/reconciler/controller" + kafkachannel "github.com/knative/eventing/contrib/kafka/pkg/reconciler/dispatcher" "github.com/knative/eventing/pkg/logconfig" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/reconciler" "github.com/knative/pkg/configmap" kncontroller "github.com/knative/pkg/controller" "github.com/knative/pkg/signals" - "github.com/knative/pkg/system" "go.uber.org/zap" - kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -66,26 +62,17 @@ func main() { logger = logger.With(zap.String("controller/impl", "pkg")) logger.Info("Starting the Kafka dispatcher") - systemNS := system.Namespace() - const numControllers = 1 cfg.QPS = numControllers * rest.DefaultQPS cfg.Burst = numControllers * rest.DefaultBurst opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh) // Setting up our own eventingClientSet as we need the messaging API introduced with kafka. eventingClientSet := clientset.NewForConfigOrDie(cfg) - - kubeInformerFactory := kubeinformers.NewSharedInformerFactory(opt.KubeClientSet, opt.ResyncPeriod) eventingInformerFactory := informers.NewSharedInformerFactory(eventingClientSet, opt.ResyncPeriod) // Messaging kafkaChannelInformer := eventingInformerFactory.Messaging().V1alpha1().KafkaChannels() - // Kube - serviceInformer := kubeInformerFactory.Core().V1().Services() - endpointsInformer := kubeInformerFactory.Core().V1().Endpoints() - deploymentInformer := kubeInformerFactory.Apps().V1().Deployments() - // Build all of our controllers, with the clients constructed above. // Add new controllers to this array. // You also need to modify numControllers above to match this. @@ -94,13 +81,7 @@ func main() { opt, eventingClientSet, kafkaConfig, - systemNS, - dispatcherDeploymentName, - dispatcherServiceName, kafkaChannelInformer, - deploymentInformer, - serviceInformer, - endpointsInformer, ), } // This line asserts at compile time that the length of controllers is equal to numControllers. @@ -124,11 +105,6 @@ func main() { stopCh, // Messaging kafkaChannelInformer.Informer(), - - // Kube - serviceInformer.Informer(), - deploymentInformer.Informer(), - endpointsInformer.Informer(), ); err != nil { logger.Fatalf("Failed to start informers: %v", err) } diff --git a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go index 2357a0003b2..0617314816a 100644 --- a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go @@ -88,9 +88,6 @@ var ( // Check that our Reconciler implements controller.Reconciler. var _ controller.Reconciler = (*Reconciler)(nil) -// Check that our Reconciler implements cache.ResourceEventHandler -var _ cache.ResourceEventHandler = (*Reconciler)(nil) - // NewController initializes the controller and is called by the generated code. // Registers event handlers to enqueue events. func NewController( @@ -117,21 +114,6 @@ func NewController( return r.impl } -// cache.ResourceEventHandler implementation. -// These 3 functions just cause a Global Resync of the channels, because any changes here -// should be reflected onto the channels. -func (r *Reconciler) OnAdd(obj interface{}) { - r.impl.GlobalResync(r.kafkachannelInformer) -} - -func (r *Reconciler) OnUpdate(old, new interface{}) { - r.impl.GlobalResync(r.kafkachannelInformer) -} - -func (r *Reconciler) OnDelete(obj interface{}) { - r.impl.GlobalResync(r.kafkachannelInformer) -} - // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the KafkaChannel resource // with the current status of the resource. @@ -161,15 +143,15 @@ func (r *Reconciler) Reconcile(ctx context.Context, key string) error { reconcileErr := r.reconcile(ctx, channel) if reconcileErr != nil { logging.FromContext(ctx).Error("Error reconciling KafkaChannel", zap.Error(reconcileErr)) - r.Recorder.Eventf(channel, corev1.EventTypeWarning, channelReconcileFailed, "KafkaChannel reconciliation failed: %v", reconcileErr) + r.Recorder.Eventf(channel, corev1.EventTypeWarning, dispatcherReconcileFailed, "KafkaChannel reconciliation failed: %v", reconcileErr) } else { logging.FromContext(ctx).Debug("KafkaChannel reconciled") - r.Recorder.Event(channel, corev1.EventTypeNormal, channelReconciled, "KafkaChannel reconciled") + r.Recorder.Event(channel, corev1.EventTypeNormal, dispatcherReconciled, "KafkaChannel reconciled") } if _, updateStatusErr := r.updateStatus(ctx, channel); updateStatusErr != nil { - logging.FromContext(ctx).Error("Failed to update GoogleCloudPubSubChannel status", zap.Error(updateStatusErr)) - r.Recorder.Eventf(channel, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update KafkaChannel's status: %v", updateStatusErr) + logging.FromContext(ctx).Error("Failed to update KafkaChannel status", zap.Error(updateStatusErr)) + r.Recorder.Eventf(channel, corev1.EventTypeWarning, dispatcherUpdateStatusFailed, "Failed to update KafkaChannel's status: %v", updateStatusErr) return updateStatusErr } From d4603290c4d60db473d300e92f911cedb2152266 Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 10:48:27 -0700 Subject: [PATCH 42/64] cosmetics --- contrib/kafka/cmd/channel_controller/main.go | 2 -- contrib/kafka/pkg/reconciler/controller/kafkachannel.go | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/contrib/kafka/cmd/channel_controller/main.go b/contrib/kafka/cmd/channel_controller/main.go index 1f2d23a72bc..ac4e168afc6 100644 --- a/contrib/kafka/cmd/channel_controller/main.go +++ b/contrib/kafka/cmd/channel_controller/main.go @@ -20,8 +20,6 @@ import ( "flag" "github.com/knative/eventing/contrib/kafka/pkg/utils" "log" - // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" informers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions" diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go index 7aefc1e61bb..69b3d0f919a 100644 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -200,7 +200,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key string) error { } if _, updateStatusErr := r.updateStatus(ctx, channel); updateStatusErr != nil { - logging.FromContext(ctx).Error("Failed to update GoogleCloudPubSubChannel status", zap.Error(updateStatusErr)) + logging.FromContext(ctx).Error("Failed to update KafkaChannel status", zap.Error(updateStatusErr)) r.Recorder.Eventf(channel, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update KafkaChannel's status: %v", updateStatusErr) return updateStatusErr } From 169eeeb07eb778a63e16cfda908145e1c50ee0cb Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 10:50:17 -0700 Subject: [PATCH 43/64] sockpuppetttttttt --- contrib/kafka/cmd/controller/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go index cbf42e3aa30..7a1dd26cf41 100644 --- a/contrib/kafka/cmd/controller/main.go +++ b/contrib/kafka/cmd/controller/main.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Knative Authors +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 96033d72c4abbffca7c3af52588e0f0052da5982 Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 12:13:43 -0700 Subject: [PATCH 44/64] adding new kafka dispatcher controller instead of the previous one --- contrib/kafka/cmd/channel_dispatcher/main.go | 15 +- contrib/kafka/cmd/channel_dispatcher/main1.go | 98 ----- contrib/kafka/cmd/dispatcher/main.go | 13 +- contrib/kafka/pkg/dispatcher/dispatcher.go | 41 ++- .../pkg/reconciler/controller/kafkachannel.go | 4 +- .../reconciler/controller/resources/topic.go | 30 -- .../pkg/reconciler/dispatcher/kafkachannel.go | 345 ++---------------- contrib/kafka/pkg/utils/util.go | 9 +- 8 files changed, 98 insertions(+), 457 deletions(-) delete mode 100644 contrib/kafka/cmd/channel_dispatcher/main1.go delete mode 100644 contrib/kafka/pkg/reconciler/controller/resources/topic.go diff --git a/contrib/kafka/cmd/channel_dispatcher/main.go b/contrib/kafka/cmd/channel_dispatcher/main.go index 06edfe55572..7a62e4d8bad 100644 --- a/contrib/kafka/cmd/channel_dispatcher/main.go +++ b/contrib/kafka/cmd/channel_dispatcher/main.go @@ -23,6 +23,7 @@ import ( clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" informers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions" + "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" kafkachannel "github.com/knative/eventing/contrib/kafka/pkg/reconciler/dispatcher" "github.com/knative/eventing/pkg/logconfig" "github.com/knative/eventing/pkg/logging" @@ -59,6 +60,18 @@ func main() { logger.Fatalw("Error loading kafka config", zap.Error(err)) } + args := &dispatcher.KafkaDispatcherArgs{ + ClientID: "kafka-ch-dispatcher", + Brokers: kafkaConfig.Brokers, + ConsumerMode: kafkaConfig.ConsumerMode, + TopicFunc: utils.TopicName, + Logger: logger.Desugar(), + } + kafkaDispatcher, err := dispatcher.NewDispatcher(args) + if err != nil { + logger.Fatalw("Unable to create kafka dispatcher", zap.Error(err)) + } + logger = logger.With(zap.String("controller/impl", "pkg")) logger.Info("Starting the Kafka dispatcher") @@ -80,7 +93,7 @@ func main() { kafkachannel.NewController( opt, eventingClientSet, - kafkaConfig, + kafkaDispatcher, kafkaChannelInformer, ), } diff --git a/contrib/kafka/cmd/channel_dispatcher/main1.go b/contrib/kafka/cmd/channel_dispatcher/main1.go deleted file mode 100644 index 995710bca9d..00000000000 --- a/contrib/kafka/cmd/channel_dispatcher/main1.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "flag" - "github.com/knative/eventing/contrib/kafka/pkg/utils" - "log" - - "github.com/knative/eventing/contrib/kafka/pkg/controller" - "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" - "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "github.com/knative/eventing/pkg/channelwatcher" - "github.com/knative/eventing/pkg/tracing" - "github.com/knative/pkg/configmap" - "github.com/knative/pkg/signals" - "github.com/knative/pkg/system" - "go.uber.org/zap" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client/config" - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -// TODO update the dispatcher in follow up PR. -func main() { - flag.Parse() - logger, err := zap.NewProduction() - if err != nil { - log.Fatalf("Unable to create logger: %v", err) - } - kafkaConfig, err := utils.GetKafkaConfig("/etc/config-kafka") - if err != nil { - logger.Fatal("Unable to load provisioner config", zap.Error(err)) - } - - mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) - if err != nil { - logger.Fatal("Unable to create manager.", zap.Error(err)) - } - - kafkaDispatcher, err := dispatcher.NewDispatcher(kafkaConfig.Brokers, kafkaConfig.ConsumerMode, logger) - if err != nil { - logger.Fatal("Unable to create kafka dispatcher", zap.Error(err)) - } - if err = mgr.Add(kafkaDispatcher); err != nil { - logger.Fatal("Unable to add the kafka dispatcher", zap.Error(err)) - } - - if err := v1alpha1.AddToScheme(mgr.GetScheme()); err != nil { - logger.Fatal("Unable to add scheme for eventing apis.", zap.Error(err)) - } - - // Zipkin tracing. - kc := kubernetes.NewForConfigOrDie(mgr.GetConfig()) - configMapWatcher := configmap.NewInformedWatcher(kc, system.Namespace()) - if err = tracing.SetupDynamicZipkinPublishing(logger.Sugar(), configMapWatcher, "kafka-ch-dispatcher"); err != nil { - logger.Fatal("Error setting up Zipkin publishing", zap.Error(err)) - } - - if err = channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)); err != nil { - logger.Fatal("Unable to create channel watcher.", zap.Error(err)) - } - - // set up signals so we handle the first shutdown signal gracefully - stopCh := signals.SetupSignalHandler() - - // configMapWatcher does not block, so start it first. - if err = configMapWatcher.Start(stopCh); err != nil { - logger.Fatal("Failed to start ConfigMap watcher", zap.Error(err)) - } - - // Start blocks forever. - err = mgr.Start(stopCh) - if err != nil { - logger.Fatal("Manager.Start() returned an error", zap.Error(err)) - } - logger.Info("Exiting...") -} - -func shouldWatch(ch *v1alpha1.Channel) bool { - return ch.Spec.Provisioner != nil && - ch.Spec.Provisioner.Namespace == "" && - ch.Spec.Provisioner.Name == controller.Name -} diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go index da004665bb7..d1880527276 100644 --- a/contrib/kafka/cmd/dispatcher/main.go +++ b/contrib/kafka/cmd/dispatcher/main.go @@ -18,13 +18,15 @@ package main import ( "flag" - "github.com/knative/eventing/contrib/kafka/pkg/utils" + "fmt" "log" "github.com/knative/eventing/contrib/kafka/pkg/controller" "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" + "github.com/knative/eventing/contrib/kafka/pkg/utils" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/channelwatcher" + topicUtils "github.com/knative/eventing/pkg/provisioners/utils" "github.com/knative/eventing/pkg/tracing" "github.com/knative/pkg/configmap" "github.com/knative/pkg/signals" @@ -51,7 +53,14 @@ func main() { logger.Fatal("unable to create manager.", zap.Error(err)) } - kafkaDispatcher, err := dispatcher.NewDispatcher(provisionerConfig.Brokers, provisionerConfig.ConsumerMode, logger) + args := &dispatcher.KafkaDispatcherArgs{ + ClientID: fmt.Sprintf("%s-dispatcher", controller.Name), + Brokers: provisionerConfig.Brokers, + ConsumerMode: provisionerConfig.ConsumerMode, + TopicFunc: topicUtils.TopicName, + Logger: logger, + } + kafkaDispatcher, err := dispatcher.NewDispatcher(args) if err != nil { logger.Fatal("unable to create kafka dispatcher.", zap.Error(err)) } diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index 47cde09d3fe..d896592e0f2 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -23,7 +23,7 @@ import ( "sync/atomic" "github.com/Shopify/sarama" - cluster "github.com/bsm/sarama-cluster" + "github.com/bsm/sarama-cluster" "github.com/google/go-cmp/cmp" "go.uber.org/zap" @@ -31,7 +31,6 @@ import ( eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1" "github.com/knative/eventing/pkg/provisioners" "github.com/knative/eventing/pkg/provisioners/multichannelfanout" - topicUtils "github.com/knative/eventing/pkg/provisioners/utils" ) type KafkaDispatcher struct { @@ -47,7 +46,18 @@ type KafkaDispatcher struct { kafkaConsumers map[provisioners.ChannelReference]map[subscription]KafkaConsumer kafkaCluster KafkaCluster - logger *zap.Logger + topicFunc TopicFunc + logger *zap.Logger +} + +type TopicFunc func(separator, namespace, name string) string + +type KafkaDispatcherArgs struct { + ClientID string + Brokers []string + ConsumerMode cluster.ConsumerMode + TopicFunc TopicFunc + Logger *zap.Logger } type KafkaConsumer interface { @@ -199,8 +209,8 @@ func (d *KafkaDispatcher) Start(stopCh <-chan struct{}) error { func (d *KafkaDispatcher) subscribe(channelRef provisioners.ChannelReference, sub subscription) error { d.logger.Info("Subscribing", zap.Any("channelRef", channelRef), zap.Any("subscription", sub)) - topicName := topicUtils.TopicName(utils.KafkaChannelSeparator, channelRef.Namespace, channelRef.Name) - + topicName := d.topicFunc(utils.KafkaChannelSeparator, channelRef.Namespace, channelRef.Name) + // TODO check whether group can be the same for provisioner and CRD impl? group := fmt.Sprintf("%s.%s", controller.Name, sub.UID) consumer, err := d.kafkaCluster.NewConsumer(group, []string{topicName}) @@ -301,11 +311,11 @@ func (d *KafkaDispatcher) setHostToChannelMap(hcMap map[string]provisioners.Chan d.hostToChannelMap.Store(hcMap) } -func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger *zap.Logger) (*KafkaDispatcher, error) { +func NewDispatcher(args *KafkaDispatcherArgs) (*KafkaDispatcher, error) { conf := sarama.NewConfig() conf.Version = sarama.V1_1_0_0 - conf.ClientID = controller.Name + "-dispatcher" - client, err := sarama.NewClient(brokers, conf) + conf.ClientID = args.ClientID + client, err := sarama.NewClient(args.Brokers, conf) if err != nil { return nil, fmt.Errorf("unable to create kafka client: %v", err) } @@ -316,20 +326,20 @@ func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger * } dispatcher := &KafkaDispatcher{ - dispatcher: provisioners.NewMessageDispatcher(logger.Sugar()), + dispatcher: provisioners.NewMessageDispatcher(args.Logger.Sugar()), - kafkaCluster: &saramaCluster{kafkaBrokers: brokers, consumerMode: consumerMode}, + kafkaCluster: &saramaCluster{kafkaBrokers: args.Brokers, consumerMode: args.ConsumerMode}, kafkaConsumers: make(map[provisioners.ChannelReference]map[subscription]KafkaConsumer), kafkaAsyncProducer: producer, - logger: logger, + logger: args.Logger, } receiverFunc, err := provisioners.NewMessageReceiver( func(channel provisioners.ChannelReference, message *provisioners.Message) error { - dispatcher.kafkaAsyncProducer.Input() <- toKafkaMessage(channel, message) + dispatcher.kafkaAsyncProducer.Input() <- toKafkaMessage(channel, message, args.TopicFunc) return nil }, - logger.Sugar(), + args.Logger.Sugar(), provisioners.ResolveChannelFromHostHeader(provisioners.ResolveChannelFromHostFunc(dispatcher.getChannelReferenceFromHost))) if err != nil { return nil, err @@ -337,6 +347,7 @@ func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger * dispatcher.receiver = receiverFunc dispatcher.setConfig(&multichannelfanout.Config{}) dispatcher.setHostToChannelMap(map[string]provisioners.ChannelReference{}) + dispatcher.topicFunc = args.TopicFunc return dispatcher, nil } @@ -361,9 +372,9 @@ func fromKafkaMessage(kafkaMessage *sarama.ConsumerMessage) *provisioners.Messag return &message } -func toKafkaMessage(channel provisioners.ChannelReference, message *provisioners.Message) *sarama.ProducerMessage { +func toKafkaMessage(channel provisioners.ChannelReference, message *provisioners.Message, topicFunc TopicFunc) *sarama.ProducerMessage { kafkaMessage := sarama.ProducerMessage{ - Topic: topicUtils.TopicName(utils.KafkaChannelSeparator, channel.Namespace, channel.Name), + Topic: topicFunc(utils.KafkaChannelSeparator, channel.Namespace, channel.Name), Value: sarama.ByteEncoder(message.Payload), } for h, v := range message.Headers { diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go index 69b3d0f919a..2ef45bdd57e 100644 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -401,7 +401,7 @@ func (r *Reconciler) createClient(ctx context.Context, kc *v1alpha1.KafkaChannel func (r *Reconciler) createTopic(ctx context.Context, channel *v1alpha1.KafkaChannel, kafkaClusterAdmin sarama.ClusterAdmin) error { logger := logging.FromContext(ctx) - topicName := resources.MakeTopicName(channel) + topicName := utils.TopicName(utils.KafkaChannelSeparator, channel.Namespace, channel.Name) logger.Info("Creating topic on Kafka cluster", zap.String("topic", topicName)) err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ ReplicationFactor: channel.Spec.ReplicationFactor, @@ -420,7 +420,7 @@ func (r *Reconciler) createTopic(ctx context.Context, channel *v1alpha1.KafkaCha func (r *Reconciler) deleteTopic(ctx context.Context, channel *v1alpha1.KafkaChannel, kafkaClusterAdmin sarama.ClusterAdmin) error { logger := logging.FromContext(ctx) - topicName := resources.MakeTopicName(channel) + topicName := utils.TopicName(utils.KafkaChannelSeparator, channel.Namespace, channel.Name) logger.Info("Deleting topic on Kafka Cluster", zap.String("topic", topicName)) err := kafkaClusterAdmin.DeleteTopic(topicName) if err == sarama.ErrUnknownTopicOrPartition { diff --git a/contrib/kafka/pkg/reconciler/controller/resources/topic.go b/contrib/kafka/pkg/reconciler/controller/resources/topic.go deleted file mode 100644 index 81d756c2d74..00000000000 --- a/contrib/kafka/pkg/reconciler/controller/resources/topic.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resources - -import ( - "fmt" - "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" -) - -const ( - knativeKafkaTopicPrefix = "knative-messaging-kafka" -) - -func MakeTopicName(channel *v1alpha1.KafkaChannel) string { - return fmt.Sprintf("%s.%s.%s", knativeKafkaTopicPrefix, channel.Namespace, channel.Name) -} diff --git a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go index 0617314816a..3f746aa6c8d 100644 --- a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go @@ -18,33 +18,17 @@ package controller import ( "context" - "encoding/json" - "fmt" - "github.com/knative/eventing/contrib/kafka/pkg/utils" - "github.com/knative/eventing/pkg/reconciler/names" - "reflect" - "time" - - "github.com/Shopify/sarama" "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" messaginginformers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1" listers "github.com/knative/eventing/contrib/kafka/pkg/client/listers/messaging/v1alpha1" - "github.com/knative/eventing/contrib/kafka/pkg/reconciler/controller/resources" + "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" "github.com/knative/eventing/pkg/logging" + "github.com/knative/eventing/pkg/provisioners/fanout" + "github.com/knative/eventing/pkg/provisioners/multichannelfanout" "github.com/knative/eventing/pkg/reconciler" "github.com/knative/pkg/controller" - "go.uber.org/zap" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - appsv1informers "k8s.io/client-go/informers/apps/v1" - corev1informers "k8s.io/client-go/informers/core/v1" - appsv1listers "k8s.io/client-go/listers/apps/v1" - corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,24 +39,13 @@ const ( // controllerAgentName is the string used by this controller to identify // itself when creating events. controllerAgentName = "kafka-ch-dispatcher" - - finalizerName = controllerAgentName - - // Name of the corev1.Events emitted from the reconciliation process. - dispatcherReconciled = "DispatcherReconciled" - dispatcherReconcileFailed = "DispatcherReconcileFailed" - dispatcherUpdateStatusFailed = "DispatcherUpdateStatusFailed" ) // Reconciler reconciles Kafka Channels. type Reconciler struct { *reconciler.Base - kafkaConfig *utils.KafkaConfig - - // Using a shared kafkaClusterAdmin does not work currently because of an issue with - // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. - kafkaClusterAdmin sarama.ClusterAdmin + kafkaDispatcher *dispatcher.KafkaDispatcher eventingClientSet *versioned.Clientset kafkachannelLister listers.KafkaChannelLister @@ -80,11 +53,6 @@ type Reconciler struct { impl *controller.Impl } -var ( - deploymentGVK = appsv1.SchemeGroupVersion.WithKind("Deployment") - serviceGVK = corev1.SchemeGroupVersion.WithKind("Service") -) - // Check that our Reconciler implements controller.Reconciler. var _ controller.Reconciler = (*Reconciler)(nil) @@ -93,13 +61,13 @@ var _ controller.Reconciler = (*Reconciler)(nil) func NewController( opt reconciler.Options, eventingClientSet *versioned.Clientset, - kafkaConfig *utils.KafkaConfig, + kafkaDispatcher *dispatcher.KafkaDispatcher, kafkachannelInformer messaginginformers.KafkaChannelInformer, ) *controller.Impl { r := &Reconciler{ Base: reconciler.NewBase(opt, controllerAgentName), - kafkaConfig: kafkaConfig, + kafkaDispatcher: kafkaDispatcher, eventingClientSet: eventingClientSet, kafkachannelLister: kafkachannelInformer.Lister(), kafkachannelInformer: kafkachannelInformer.Informer(), @@ -114,299 +82,60 @@ func NewController( return r.impl } -// Reconcile compares the actual state with the desired, and attempts to -// converge the two. It then updates the Status block of the KafkaChannel resource -// with the current status of the resource. func (r *Reconciler) Reconcile(ctx context.Context, key string) error { // Convert the namespace/name string into a distinct namespace and name. - namespace, name, err := cache.SplitMetaNamespaceKey(key) + _, _, err := cache.SplitMetaNamespaceKey(key) if err != nil { logging.FromContext(ctx).Error("invalid resource key") return nil } - // Get the KafkaChannel resource with this namespace/name. - original, err := r.kafkachannelLister.KafkaChannels(namespace).Get(name) - if apierrs.IsNotFound(err) { - // The resource may no longer exist, in which case we stop processing. - logging.FromContext(ctx).Error("KafkaChannel key in work queue no longer exists") - return nil - } else if err != nil { - return err - } - - // Don't modify the informers copy. - channel := original.DeepCopy() - - // Reconcile this copy of the KafkaChannel and then write back any status updates regardless of - // whether the reconcile error out. - reconcileErr := r.reconcile(ctx, channel) - if reconcileErr != nil { - logging.FromContext(ctx).Error("Error reconciling KafkaChannel", zap.Error(reconcileErr)) - r.Recorder.Eventf(channel, corev1.EventTypeWarning, dispatcherReconcileFailed, "KafkaChannel reconciliation failed: %v", reconcileErr) - } else { - logging.FromContext(ctx).Debug("KafkaChannel reconciled") - r.Recorder.Event(channel, corev1.EventTypeNormal, dispatcherReconciled, "KafkaChannel reconciled") - } + // This is a special Reconciler that does the following: + // 1. Lists the kafka channels. + // 2. Creates a multi-channel-fanout-config. + // 3. Calls the kafka dispatcher's updateConfig func with the new multi-channel-fanout-config. - if _, updateStatusErr := r.updateStatus(ctx, channel); updateStatusErr != nil { - logging.FromContext(ctx).Error("Failed to update KafkaChannel status", zap.Error(updateStatusErr)) - r.Recorder.Eventf(channel, corev1.EventTypeWarning, dispatcherUpdateStatusFailed, "Failed to update KafkaChannel's status: %v", updateStatusErr) - return updateStatusErr - } - - // Requeue if the resource is not ready - return reconcileErr -} - -func (r *Reconciler) reconcile(ctx context.Context, kc *v1alpha1.KafkaChannel) error { - kc.Status.InitializeConditions() - - logger := logging.FromContext(ctx) - // Verify channel is valid. - if err := kc.Validate(ctx); err != nil { - logger.Error("Invalid kafka channel", zap.String("channel", kc.Name), zap.Error(err)) - return err - } - - kafkaClusterAdmin, err := r.createClient(ctx, kc) + channels, err := r.kafkachannelLister.List(labels.Everything()) if err != nil { - logger.Error("Unable to build kafka admin client", zap.String("channel", kc.Name), zap.Error(err)) + logging.FromContext(ctx).Error("Error listing kafka channels") return err } - // See if the channel has been deleted. - if kc.DeletionTimestamp != nil { - if err := r.deleteTopic(ctx, kc, kafkaClusterAdmin); err != nil { - return err + kafkaChannels := make([]*v1alpha1.KafkaChannel, 0) + for _, channel := range channels { + if channel.Status.IsReady() { + kafkaChannels = append(kafkaChannels, channel) } - removeFinalizer(kc) - _, err := r.eventingClientSet.MessagingV1alpha1().KafkaChannels(kc.Namespace).Update(kc) - return err - } - - // If we are adding the finalizer for the first time, then ensure that finalizer is persisted - // before manipulating Kafka. - if err := r.ensureFinalizer(kc); err != nil { - return err - } - - // We reconcile the status of the Channel by looking at: - // 1. Kafka topic used by the channel. - // 2. Dispatcher Deployment for it's readiness. - // 3. Dispatcher k8s Service for it's existence. - // 4. Dispatcher endpoints to ensure that there's something backing the Service. - // 5. K8s service representing the channel that will use ExternalName to point to the Dispatcher k8s service. - - if err := r.createTopic(ctx, kc, kafkaClusterAdmin); err != nil { - kc.Status.MarkTopicFailed("TopicCreateFailed", "error while creating topic: %s", err) - return err - } - kc.Status.MarkTopicTrue() - - // Get the Dispatcher Deployment and propagate the status to the Channel - d, err := r.deploymentLister.Deployments(r.dispatcherNamespace).Get(r.dispatcherDeploymentName) - if err != nil { - if apierrs.IsNotFound(err) { - kc.Status.MarkDispatcherFailed("DispatcherDeploymentDoesNotExist", "Dispatcher Deployment does not exist") - } else { - logger.Error("Unable to get the dispatcher Deployment", zap.Error(err)) - kc.Status.MarkDispatcherFailed("DispatcherDeploymentGetFailed", "Failed to get dispatcher Deployment") - } - return err - } - kc.Status.PropagateDispatcherStatus(&d.Status) - - // Get the Dispatcher Service and propagate the status to the Channel in case it does not exist. - // We don't do anything with the service because it's status contains nothing useful, so just do - // an existence check. Then below we check the endpoints targeting it. - _, err = r.serviceLister.Services(r.dispatcherNamespace).Get(r.dispatcherServiceName) - if err != nil { - if apierrs.IsNotFound(err) { - kc.Status.MarkServiceFailed("DispatcherServiceDoesNotExist", "Dispatcher Service does not exist") - } else { - logger.Error("Unable to get the dispatcher service", zap.Error(err)) - kc.Status.MarkServiceFailed("DispatcherServiceGetFailed", "Failed to get dispatcher service") - } - return err - } - kc.Status.MarkServiceTrue() - - // Get the Dispatcher Service Endpoints and propagate the status to the Channel - // endpoints has the same name as the service, so not a bug. - e, err := r.endpointsLister.Endpoints(r.dispatcherNamespace).Get(r.dispatcherServiceName) - if err != nil { - if apierrs.IsNotFound(err) { - kc.Status.MarkEndpointsFailed("DispatcherEndpointsDoesNotExist", "Dispatcher Endpoints does not exist") - } else { - logger.Error("Unable to get the dispatcher endpoints", zap.Error(err)) - kc.Status.MarkEndpointsFailed("DispatcherEndpointsGetFailed", "Failed to get dispatcher endpoints") - } - return err - } - - if len(e.Subsets) == 0 { - logger.Error("No endpoints found for Dispatcher service", zap.Error(err)) - kc.Status.MarkEndpointsFailed("DispatcherEndpointsNotReady", "There are no endpoints ready for Dispatcher service") - return fmt.Errorf("there are no endpoints ready for Dispatcher service %s", r.dispatcherServiceName) } - kc.Status.MarkEndpointsTrue() - // Reconcile the k8s service representing the actual Channel. It points to the Dispatcher service via ExternalName - svc, err := r.reconcileChannelService(ctx, kc) + config := r.newConfigFromKafkaChannels(kafkaChannels) + err = r.kafkaDispatcher.UpdateConfig(config) if err != nil { - kc.Status.MarkChannelServiceFailed("ChannelServiceFailed", fmt.Sprintf("Channel Service failed: %s", err)) + logging.FromContext(ctx).Error("Error updating kafka dispatcher config") return err } - kc.Status.MarkChannelServiceTrue() - kc.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace)) - // close the connection - err = kafkaClusterAdmin.Close() - if err != nil { - logger.Error("Error closing the connection", zap.Error(err)) - return err - } - - // Ok, so now the Dispatcher Deployment & Service have been created, we're golden since the - // dispatcher watches the Channel and where it needs to dispatch events to. return nil } -func (r *Reconciler) reconcileChannelService(ctx context.Context, channel *v1alpha1.KafkaChannel) (*corev1.Service, error) { - logger := logging.FromContext(ctx) - // Get the Service and propagate the status to the Channel in case it does not exist. - // We don't do anything with the service because it's status contains nothing useful, so just do - // an existence check. Then below we check the endpoints targeting it. - // We may change this name later, so we have to ensure we use proper addressable when resolving these. - svc, err := r.serviceLister.Services(channel.Namespace).Get(resources.MakeChannelServiceName(channel.Name)) - if err != nil { - if apierrs.IsNotFound(err) { - svc, err = resources.MakeK8sService(channel, resources.ExternalService(r.dispatcherNamespace, r.dispatcherServiceName)) - if err != nil { - logger.Error("Failed to create the channel service object", zap.Error(err)) - return nil, err - } - svc, err = r.KubeClientSet.CoreV1().Services(channel.Namespace).Create(svc) - if err != nil { - logger.Error("Failed to create the channel service", zap.Error(err)) - return nil, err - } - return svc, nil - } else { - logger.Error("Unable to get the channel service", zap.Error(err)) +// newConfigFromKafkaChannels creates a new Config from the list of kafka channels. +func (r *Reconciler) newConfigFromKafkaChannels(channels []*v1alpha1.KafkaChannel) *multichannelfanout.Config { + cc := make([]multichannelfanout.ChannelConfig, 0) + for _, c := range channels { + channelConfig := multichannelfanout.ChannelConfig{ + Namespace: c.Namespace, + Name: c.Name, + HostName: c.Status.Address.Hostname, } - return nil, err - } - // Check to make sure that the KafkaChannel owns this service and if not, complain. - if !metav1.IsControlledBy(svc, channel) { - return nil, fmt.Errorf("kafkachannel: %s/%s does not own Service: %q", channel.Namespace, channel.Name, svc.Name) - } - return svc, nil -} - -func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.KafkaChannel) (*v1alpha1.KafkaChannel, error) { - kc, err := r.kafkachannelLister.KafkaChannels(desired.Namespace).Get(desired.Name) - if err != nil { - return nil, err - } - - if reflect.DeepEqual(kc.Status, desired.Status) { - return kc, nil - } - - becomesReady := desired.Status.IsReady() && !kc.Status.IsReady() - - // Don't modify the informers copy. - existing := kc.DeepCopy() - existing.Status = desired.Status - - new, err := r.eventingClientSet.MessagingV1alpha1().KafkaChannels(desired.Namespace).UpdateStatus(existing) - if err == nil && becomesReady { - duration := time.Since(new.ObjectMeta.CreationTimestamp.Time) - r.Logger.Infof("KafkaChannel %q became ready after %v", kc.Name, duration) - if err := r.StatsReporter.ReportReady("Channel", kc.Namespace, kc.Name, duration); err != nil { - r.Logger.Infof("Failed to record ready for KafkaChannel %q: %v", kc.Name, err) + if c.Spec.Subscribable != nil { + channelConfig.FanoutConfig = fanout.Config{ + AsyncHandler: true, + Subscriptions: c.Spec.Subscribable.Subscribers, + } } + cc = append(cc, channelConfig) } - return new, err -} - -func (r *Reconciler) createClient(ctx context.Context, kc *v1alpha1.KafkaChannel) (sarama.ClusterAdmin, error) { - // We don't currently initialize r.kafkaClusterAdmin, hence we end up creating the cluster admin client every time. - // This is because of an issue with Shopify/sarama. See https://github.com/Shopify/sarama/issues/1162. - // Once the issue is fixed we should use a shared cluster admin client. Also, r.kafkaClusterAdmin is currently - // used to pass a fake admin client in the tests. - kafkaClusterAdmin := r.kafkaClusterAdmin - if kafkaClusterAdmin == nil { - var err error - kafkaClusterAdmin, err = resources.MakeClient(controllerAgentName, r.kafkaConfig.Brokers) - if err != nil { - return nil, err - } + return &multichannelfanout.Config{ + ChannelConfigs: cc, } - return kafkaClusterAdmin, nil -} - -func (r *Reconciler) createTopic(ctx context.Context, channel *v1alpha1.KafkaChannel, kafkaClusterAdmin sarama.ClusterAdmin) error { - logger := logging.FromContext(ctx) - - topicName := resources.MakeTopicName(channel) - logger.Info("Creating topic on Kafka cluster", zap.String("topic", topicName)) - err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ - ReplicationFactor: channel.Spec.ReplicationFactor, - NumPartitions: channel.Spec.NumPartitions, - }, false) - if err == sarama.ErrTopicAlreadyExists { - return nil - } else if err != nil { - logger.Error("Error creating topic", zap.String("topic", topicName), zap.Error(err)) - } else { - logger.Info("Successfully created topic", zap.String("topic", topicName)) - } - return err -} - -func (r *Reconciler) deleteTopic(ctx context.Context, channel *v1alpha1.KafkaChannel, kafkaClusterAdmin sarama.ClusterAdmin) error { - logger := logging.FromContext(ctx) - - topicName := resources.MakeTopicName(channel) - logger.Info("Deleting topic on Kafka Cluster", zap.String("topic", topicName)) - err := kafkaClusterAdmin.DeleteTopic(topicName) - if err == sarama.ErrUnknownTopicOrPartition { - return nil - } else if err != nil { - logger.Error("Error deleting topic", zap.String("topic", topicName), zap.Error(err)) - } else { - logger.Info("Successfully deleted topic", zap.String("topic", topicName)) - } - return err -} - -func (r *Reconciler) ensureFinalizer(channel *v1alpha1.KafkaChannel) error { - finalizers := sets.NewString(channel.Finalizers...) - if finalizers.Has(finalizerName) { - return nil - } - - mergePatch := map[string]interface{}{ - "metadata": map[string]interface{}{ - "finalizers": append(channel.Finalizers, finalizerName), - "resourceVersion": channel.ResourceVersion, - }, - } - - patch, err := json.Marshal(mergePatch) - if err != nil { - return err - } - - _, err = r.eventingClientSet.MessagingV1alpha1().KafkaChannels(channel.Namespace).Patch(channel.Name, types.MergePatchType, patch) - return err -} - -func removeFinalizer(channel *v1alpha1.KafkaChannel) { - finalizers := sets.NewString(channel.Finalizers...) - finalizers.Delete(finalizerName) - channel.Finalizers = finalizers.List() } diff --git a/contrib/kafka/pkg/utils/util.go b/contrib/kafka/pkg/utils/util.go index 408132b087a..63e379a54f6 100644 --- a/contrib/kafka/pkg/utils/util.go +++ b/contrib/kafka/pkg/utils/util.go @@ -21,7 +21,7 @@ import ( "log" "strings" - cluster "github.com/bsm/sarama-cluster" + "github.com/bsm/sarama-cluster" "github.com/knative/pkg/configmap" ) @@ -38,6 +38,8 @@ const ( // DefaultReplicationFactor defines the default number of replications DefaultReplicationFactor = 1 + + knativeKafkaTopicPrefix = "knative-messaging-kafka" ) type KafkaConfig struct { @@ -84,3 +86,8 @@ func GetKafkaConfig(path string) (*KafkaConfig, error) { } return config, nil } + +func TopicName(separator, namespace, name string) string { + topic := []string{knativeKafkaTopicPrefix, namespace, name} + return strings.Join(topic, separator) +} From 4d35909dba1f8e4af8510ef78c0d689c8630978a Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 13:00:31 -0700 Subject: [PATCH 45/64] updating tests --- contrib/kafka/pkg/dispatcher/dispatcher_test.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/contrib/kafka/pkg/dispatcher/dispatcher_test.go b/contrib/kafka/pkg/dispatcher/dispatcher_test.go index 1ffd3f0d226..59db41178f1 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher_test.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher_test.go @@ -3,6 +3,7 @@ package dispatcher import ( "errors" "fmt" + "github.com/knative/eventing/pkg/provisioners/utils" "io/ioutil" "net/http" "net/http/httptest" @@ -394,8 +395,8 @@ func TestDispatcher_UpdateConfig(t *testing.T) { d := &KafkaDispatcher{ kafkaCluster: &mockSaramaCluster{closed: true}, kafkaConsumers: make(map[provisioners.ChannelReference]map[subscription]KafkaConsumer), - - logger: zap.NewNop(), + topicFunc: utils.TopicName, + logger: zap.NewNop(), } d.setConfig(&multichannelfanout.Config{}) d.setHostToChannelMap(map[string]provisioners.ChannelReference{}) @@ -501,7 +502,7 @@ func TestToKafkaMessage(t *testing.T) { }, Value: sarama.ByteEncoder(data), } - got := toKafkaMessage(channelRef, msg) + got := toKafkaMessage(channelRef, msg, utils.TopicName) if diff := cmp.Diff(want, got, cmpopts.IgnoreUnexported(sarama.ProducerMessage{})); diff != "" { t.Errorf("unexpected message (-want, +got) = %s", diff) } @@ -534,6 +535,7 @@ func TestSubscribe(t *testing.T) { kafkaConsumers: make(map[provisioners.ChannelReference]map[subscription]KafkaConsumer), dispatcher: provisioners.NewMessageDispatcher(zap.NewNop().Sugar()), logger: zap.NewNop(), + topicFunc: utils.TopicName, } testHandler := &dispatchTestHandler{ @@ -580,6 +582,7 @@ func TestPartitionConsumer(t *testing.T) { kafkaConsumers: make(map[provisioners.ChannelReference]map[subscription]KafkaConsumer), dispatcher: provisioners.NewMessageDispatcher(zap.NewNop().Sugar()), logger: zap.NewNop(), + topicFunc: utils.TopicName, } testHandler := &dispatchTestHandler{ t: t, @@ -627,6 +630,7 @@ func TestSubscribeError(t *testing.T) { d := &KafkaDispatcher{ kafkaCluster: sc, logger: zap.NewNop(), + topicFunc: utils.TopicName, } channelRef := provisioners.ChannelReference{ From 20fc9b941f8b11f4cfb83dadcf1fb80a61fad9ca Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 13:11:52 -0700 Subject: [PATCH 46/64] starting dispatcher in separate thread --- contrib/kafka/cmd/channel_dispatcher/main.go | 3 +++ contrib/kafka/config/400-kafka-config.yaml | 2 +- contrib/kafka/config/500-dispatcher.yaml | 15 +++++++++++---- contrib/kafka/config/kafka.yaml | 2 +- 4 files changed, 16 insertions(+), 6 deletions(-) diff --git a/contrib/kafka/cmd/channel_dispatcher/main.go b/contrib/kafka/cmd/channel_dispatcher/main.go index 7a62e4d8bad..0d1cedd5f45 100644 --- a/contrib/kafka/cmd/channel_dispatcher/main.go +++ b/contrib/kafka/cmd/channel_dispatcher/main.go @@ -122,6 +122,9 @@ func main() { logger.Fatalf("Failed to start informers: %v", err) } + logger.Info("Starting dispatcher.") + go kafkaDispatcher.Start(stopCh) + logger.Info("Starting controllers.") kncontroller.StartAll(stopCh, controllers[:]...) } diff --git a/contrib/kafka/config/400-kafka-config.yaml b/contrib/kafka/config/400-kafka-config.yaml index 2d43a9789b1..535ff48a812 100644 --- a/contrib/kafka/config/400-kafka-config.yaml +++ b/contrib/kafka/config/400-kafka-config.yaml @@ -20,7 +20,7 @@ metadata: data: # Broker URL. Replace this with the URLs for your kafka cluster, # which is in the format of my-cluster-kafka-bootstrap.my-kafka-namespace:9092. - bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 + bootstrap_servers: REPLACE_WITH_CLUSTER_URL # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. diff --git a/contrib/kafka/config/500-dispatcher.yaml b/contrib/kafka/config/500-dispatcher.yaml index a143bb59eba..fbd75314e83 100644 --- a/contrib/kafka/config/500-dispatcher.yaml +++ b/contrib/kafka/config/500-dispatcher.yaml @@ -32,14 +32,21 @@ spec: - name: dispatcher image: github.com/knative/eventing/contrib/kafka/cmd/channel_dispatcher env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace volumeMounts: + - name: config-logging + mountPath: /etc/config-logging - name: config-kafka mountPath: /etc/config-kafka volumes: + - name: config-logging + configMap: + name: config-logging - name: config-kafka configMap: name: config-kafka diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index 46a57b4346c..fcba51067cf 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -99,7 +99,7 @@ metadata: data: # Broker URL's for the provisioner. Replace this with the URL's for your kafka cluster, # which is in the format of my-cluster-kafka-bootstrap.my-kafka-namespace:9092. - bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 + bootstrap_servers: REPLACE_WITH_CLUSTER_URL # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. From 22b3cd1c5def7cd68faf9e94b2c5cffc2d2f1d19 Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 13:27:06 -0700 Subject: [PATCH 47/64] updating subscribable validation --- contrib/kafka/config/400-kafka-config.yaml | 2 +- contrib/kafka/config/kafka.yaml | 2 +- .../subscribable_channelable_validation.go | 13 +++++++------ .../subscribable_channelable_validation_test.go | 15 ++++++++++++--- 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/contrib/kafka/config/400-kafka-config.yaml b/contrib/kafka/config/400-kafka-config.yaml index 535ff48a812..2d43a9789b1 100644 --- a/contrib/kafka/config/400-kafka-config.yaml +++ b/contrib/kafka/config/400-kafka-config.yaml @@ -20,7 +20,7 @@ metadata: data: # Broker URL. Replace this with the URLs for your kafka cluster, # which is in the format of my-cluster-kafka-bootstrap.my-kafka-namespace:9092. - bootstrap_servers: REPLACE_WITH_CLUSTER_URL + bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index fcba51067cf..46a57b4346c 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -99,7 +99,7 @@ metadata: data: # Broker URL's for the provisioner. Replace this with the URL's for your kafka cluster, # which is in the format of my-cluster-kafka-bootstrap.my-kafka-namespace:9092. - bootstrap_servers: REPLACE_WITH_CLUSTER_URL + bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. diff --git a/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation.go b/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation.go index b415b68a183..117507920ea 100644 --- a/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation.go +++ b/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation.go @@ -18,6 +18,7 @@ package v1alpha1 import ( "reflect" + "strings" "github.com/google/go-cmp/cmp" "github.com/knative/pkg/apis" @@ -30,21 +31,21 @@ func isChannelEmpty(f corev1.ObjectReference) bool { } // Valid from only contains the following fields: -// - Kind == 'Channel' -// - APIVersion == 'eventing.knative.dev/v1alpha1' +// - Kind ends in 'Channel', e.g., 'Channel', 'InMemoryChannel', etc. +// - APIVersion == 'eventing.knative.dev/v1alpha1' || 'messaging.knative.dev/v1alpha1' // - Name == not empty func isValidChannel(f corev1.ObjectReference) *apis.FieldError { errs := isValidObjectReference(f) - if f.Kind != "Channel" { + if !strings.HasSuffix(f.Kind, "Channel") { fe := apis.ErrInvalidValue(f.Kind, "kind") fe.Paths = []string{"kind"} - fe.Details = "only 'Channel' kind is allowed" + fe.Details = "only '*Channel' kind is allowed" errs = errs.Also(fe) } - if f.APIVersion != "eventing.knative.dev/v1alpha1" { + if f.APIVersion != "eventing.knative.dev/v1alpha1" && f.APIVersion != "messaging.knative.dev/v1alpha1" { fe := apis.ErrInvalidValue(f.APIVersion, "apiVersion") - fe.Details = "only eventing.knative.dev/v1alpha1 is allowed for apiVersion" + fe.Details = "only eventing.knative.dev/v1alpha1 or messaging.knative.dev/v1alpha1 are allowed for apiVersion" errs = errs.Also(fe) } return errs diff --git a/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation_test.go b/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation_test.go index 1607bcd5035..954648ac177 100644 --- a/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation_test.go +++ b/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation_test.go @@ -39,7 +39,7 @@ var validationTests = []struct { want: &apis.FieldError{ Message: "invalid value: Strait", Paths: []string{"kind"}, - Details: "only 'Channel' kind is allowed", + Details: "only '*Channel' kind is allowed", }, }, { @@ -52,8 +52,8 @@ var validationTests = []struct { want: &apis.FieldError{ Message: `invalid value: eventing.knative.dev/v1alpha2`, Paths: []string{"apiVersion"}, - Details: "only eventing.knative.dev/v1alpha1 " + - "is allowed for apiVersion", + Details: "only eventing.knative.dev/v1alpha1 or messaging.knative.dev/v1alpha1 " + + "are allowed for apiVersion", }, }, { @@ -65,6 +65,15 @@ var validationTests = []struct { }, want: nil, }, + { + name: "valid channel messaging", + ref: corev1.ObjectReference{ + Name: "boaty-mcboatface", + APIVersion: "messaging.knative.dev/v1alpha1", + Kind: "InMemoryChannel", + }, + want: nil, + }, } func TestIsChannelEmpty(t *testing.T) { From 4eaf4039859a9f25edba31a7cf4609a5c7d0ea9d Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 13:41:51 -0700 Subject: [PATCH 48/64] updating subscription channel pattern --- config/300-subscription.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/config/300-subscription.yaml b/config/300-subscription.yaml index 19f5d7f7d98..a29fe164f81 100644 --- a/config/300-subscription.yaml +++ b/config/300-subscription.yaml @@ -62,7 +62,8 @@ spec: minLength: 1 kind: type: string - pattern: "^Channel$" + # Ending with Channel. E.g., Channel, InMemoryChannel, etc. + pattern: "Channel$" name: type: string minLength: 1 From 0f633978002683c9dda82ccaca96c70eece95b98 Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 13:47:22 -0700 Subject: [PATCH 49/64] Allow creating subscriptions to CRD channels. --- config/300-subscription.yaml | 3 ++- .../subscribable_channelable_validation.go | 13 +++++++------ .../subscribable_channelable_validation_test.go | 15 ++++++++++++--- pkg/apis/eventing/v1alpha1/subscription_types.go | 4 ++-- 4 files changed, 23 insertions(+), 12 deletions(-) diff --git a/config/300-subscription.yaml b/config/300-subscription.yaml index 19f5d7f7d98..a29fe164f81 100644 --- a/config/300-subscription.yaml +++ b/config/300-subscription.yaml @@ -62,7 +62,8 @@ spec: minLength: 1 kind: type: string - pattern: "^Channel$" + # Ending with Channel. E.g., Channel, InMemoryChannel, etc. + pattern: "Channel$" name: type: string minLength: 1 diff --git a/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation.go b/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation.go index b415b68a183..8585e22009c 100644 --- a/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation.go +++ b/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation.go @@ -18,6 +18,7 @@ package v1alpha1 import ( "reflect" + "strings" "github.com/google/go-cmp/cmp" "github.com/knative/pkg/apis" @@ -30,21 +31,21 @@ func isChannelEmpty(f corev1.ObjectReference) bool { } // Valid from only contains the following fields: -// - Kind == 'Channel' -// - APIVersion == 'eventing.knative.dev/v1alpha1' +// - Kind ends in 'Channel', e.g., 'Channel', 'InMemoryChannel', etc. +// - APIVersion == 'eventing.knative.dev/v1alpha1' || 'messaging.knative.dev/v1alpha1' // - Name == not empty func isValidChannel(f corev1.ObjectReference) *apis.FieldError { errs := isValidObjectReference(f) - if f.Kind != "Channel" { + if !strings.HasSuffix(f.Kind, "Channel") { fe := apis.ErrInvalidValue(f.Kind, "kind") fe.Paths = []string{"kind"} - fe.Details = "only 'Channel' kind is allowed" + fe.Details = "only 'Channel$' kind is allowed" errs = errs.Also(fe) } - if f.APIVersion != "eventing.knative.dev/v1alpha1" { + if f.APIVersion != "eventing.knative.dev/v1alpha1" && f.APIVersion != "messaging.knative.dev/v1alpha1" { fe := apis.ErrInvalidValue(f.APIVersion, "apiVersion") - fe.Details = "only eventing.knative.dev/v1alpha1 is allowed for apiVersion" + fe.Details = "only eventing.knative.dev/v1alpha1 or messaging.knative.dev/v1alpha1 are allowed for apiVersion" errs = errs.Also(fe) } return errs diff --git a/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation_test.go b/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation_test.go index 1607bcd5035..e05f682133b 100644 --- a/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation_test.go +++ b/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation_test.go @@ -39,7 +39,7 @@ var validationTests = []struct { want: &apis.FieldError{ Message: "invalid value: Strait", Paths: []string{"kind"}, - Details: "only 'Channel' kind is allowed", + Details: "only 'Channel$' kind is allowed", }, }, { @@ -52,8 +52,8 @@ var validationTests = []struct { want: &apis.FieldError{ Message: `invalid value: eventing.knative.dev/v1alpha2`, Paths: []string{"apiVersion"}, - Details: "only eventing.knative.dev/v1alpha1 " + - "is allowed for apiVersion", + Details: "only eventing.knative.dev/v1alpha1 or messaging.knative.dev/v1alpha1 " + + "are allowed for apiVersion", }, }, { @@ -65,6 +65,15 @@ var validationTests = []struct { }, want: nil, }, + { + name: "valid channel messaging", + ref: corev1.ObjectReference{ + Name: "boaty-mcboatface", + APIVersion: "messaging.knative.dev/v1alpha1", + Kind: "InMemoryChannel", + }, + want: nil, + }, } func TestIsChannelEmpty(t *testing.T) { diff --git a/pkg/apis/eventing/v1alpha1/subscription_types.go b/pkg/apis/eventing/v1alpha1/subscription_types.go index fd2166bfa23..472d989e01b 100644 --- a/pkg/apis/eventing/v1alpha1/subscription_types.go +++ b/pkg/apis/eventing/v1alpha1/subscription_types.go @@ -76,8 +76,8 @@ type SubscriptionSpec struct { // - Kind // - APIVersion // - Name - // Kind must be "Channel" and APIVersion must be - // "eventing.knative.dev/v1alpha1" + // Kind must end in "Channel". E.g., "Channel", "InMemoryChannel", etc. + // APIVersion must be "eventing.knative.dev/v1alpha1" or ""messaging.knative.dev/v1alpha1". // // This field is immutable. We have no good answer on what happens to // the events that are currently in the channel being consumed from From 62480f32a4f0c47b3938e0a23307334910ae27ca Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 14:07:51 -0700 Subject: [PATCH 50/64] updating UTs --- .../eventing/v1alpha1/subscription_validation_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/apis/eventing/v1alpha1/subscription_validation_test.go b/pkg/apis/eventing/v1alpha1/subscription_validation_test.go index d1dcc7ae35c..a4e3ae3b606 100644 --- a/pkg/apis/eventing/v1alpha1/subscription_validation_test.go +++ b/pkg/apis/eventing/v1alpha1/subscription_validation_test.go @@ -421,7 +421,7 @@ func TestValidChannel(t *testing.T) { }, want: func() *apis.FieldError { fe := apis.ErrInvalidValue("", "apiVersion") - fe.Details = "only eventing.knative.dev/v1alpha1 is allowed for apiVersion" + fe.Details = "only eventing.knative.dev/v1alpha1 or messaging.knative.dev/v1alpha1 are allowed for apiVersion" return apis.ErrMissingField("apiVersion").Also(fe) }(), }, { @@ -432,7 +432,7 @@ func TestValidChannel(t *testing.T) { }, want: func() *apis.FieldError { fe := apis.ErrInvalidValue("", "kind") - fe.Details = "only 'Channel' kind is allowed" + fe.Details = "only 'Channel$' kind is allowed" return apis.ErrMissingField("kind").Also(fe) }(), }, { @@ -444,7 +444,7 @@ func TestValidChannel(t *testing.T) { }, want: func() *apis.FieldError { fe := apis.ErrInvalidValue("subscription", "kind") - fe.Details = "only 'Channel' kind is allowed" + fe.Details = "only 'Channel$' kind is allowed" return fe }(), }, { @@ -456,7 +456,7 @@ func TestValidChannel(t *testing.T) { }, want: func() *apis.FieldError { fe := apis.ErrInvalidValue("wrongapiversion", "apiVersion") - fe.Details = "only eventing.knative.dev/v1alpha1 is allowed for apiVersion" + fe.Details = "only eventing.knative.dev/v1alpha1 or messaging.knative.dev/v1alpha1 are allowed for apiVersion" return fe }(), }, { From 3085dfb23457fc710bef51d7f8a218697a218835 Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 14:10:12 -0700 Subject: [PATCH 51/64] cosmetic --- pkg/apis/eventing/v1alpha1/subscription_types.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/apis/eventing/v1alpha1/subscription_types.go b/pkg/apis/eventing/v1alpha1/subscription_types.go index 472d989e01b..445a24eefa5 100644 --- a/pkg/apis/eventing/v1alpha1/subscription_types.go +++ b/pkg/apis/eventing/v1alpha1/subscription_types.go @@ -77,7 +77,7 @@ type SubscriptionSpec struct { // - APIVersion // - Name // Kind must end in "Channel". E.g., "Channel", "InMemoryChannel", etc. - // APIVersion must be "eventing.knative.dev/v1alpha1" or ""messaging.knative.dev/v1alpha1". + // APIVersion must be "eventing.knative.dev/v1alpha1" or "messaging.knative.dev/v1alpha1". // // This field is immutable. We have no good answer on what happens to // the events that are currently in the channel being consumed from From 1865d7e86d2680355843ca38d0dfa71b6172b578 Mon Sep 17 00:00:00 2001 From: nachocano Date: Thu, 23 May 2019 14:29:14 -0700 Subject: [PATCH 52/64] back to replace --- contrib/kafka/config/400-kafka-config.yaml | 2 +- contrib/kafka/config/kafka.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/kafka/config/400-kafka-config.yaml b/contrib/kafka/config/400-kafka-config.yaml index 2d43a9789b1..535ff48a812 100644 --- a/contrib/kafka/config/400-kafka-config.yaml +++ b/contrib/kafka/config/400-kafka-config.yaml @@ -20,7 +20,7 @@ metadata: data: # Broker URL. Replace this with the URLs for your kafka cluster, # which is in the format of my-cluster-kafka-bootstrap.my-kafka-namespace:9092. - bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 + bootstrap_servers: REPLACE_WITH_CLUSTER_URL # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml index 46a57b4346c..fcba51067cf 100644 --- a/contrib/kafka/config/kafka.yaml +++ b/contrib/kafka/config/kafka.yaml @@ -99,7 +99,7 @@ metadata: data: # Broker URL's for the provisioner. Replace this with the URL's for your kafka cluster, # which is in the format of my-cluster-kafka-bootstrap.my-kafka-namespace:9092. - bootstrap_servers: my-cluster-kafka-bootstrap.kafka:9092 + bootstrap_servers: REPLACE_WITH_CLUSTER_URL # Consumer mode to dispatch events from different partitions in parallel. # By default(multiplex), partitions are multiplexed with a single go channel. From 97a1dd40d4d2baea20d4cceaebcc248ba31c1236 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Thu, 23 May 2019 18:43:12 -0700 Subject: [PATCH 53/64] rollback change --- .../eventing/v1alpha1/subscription_validation_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/apis/eventing/v1alpha1/subscription_validation_test.go b/pkg/apis/eventing/v1alpha1/subscription_validation_test.go index a4e3ae3b606..d1dcc7ae35c 100644 --- a/pkg/apis/eventing/v1alpha1/subscription_validation_test.go +++ b/pkg/apis/eventing/v1alpha1/subscription_validation_test.go @@ -421,7 +421,7 @@ func TestValidChannel(t *testing.T) { }, want: func() *apis.FieldError { fe := apis.ErrInvalidValue("", "apiVersion") - fe.Details = "only eventing.knative.dev/v1alpha1 or messaging.knative.dev/v1alpha1 are allowed for apiVersion" + fe.Details = "only eventing.knative.dev/v1alpha1 is allowed for apiVersion" return apis.ErrMissingField("apiVersion").Also(fe) }(), }, { @@ -432,7 +432,7 @@ func TestValidChannel(t *testing.T) { }, want: func() *apis.FieldError { fe := apis.ErrInvalidValue("", "kind") - fe.Details = "only 'Channel$' kind is allowed" + fe.Details = "only 'Channel' kind is allowed" return apis.ErrMissingField("kind").Also(fe) }(), }, { @@ -444,7 +444,7 @@ func TestValidChannel(t *testing.T) { }, want: func() *apis.FieldError { fe := apis.ErrInvalidValue("subscription", "kind") - fe.Details = "only 'Channel$' kind is allowed" + fe.Details = "only 'Channel' kind is allowed" return fe }(), }, { @@ -456,7 +456,7 @@ func TestValidChannel(t *testing.T) { }, want: func() *apis.FieldError { fe := apis.ErrInvalidValue("wrongapiversion", "apiVersion") - fe.Details = "only eventing.knative.dev/v1alpha1 or messaging.knative.dev/v1alpha1 are allowed for apiVersion" + fe.Details = "only eventing.knative.dev/v1alpha1 is allowed for apiVersion" return fe }(), }, { From 0fdb8d4609eeda0426954a92ff52cd4867d252d7 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Sun, 26 May 2019 10:09:52 -0700 Subject: [PATCH 54/64] updates after code review comments --- contrib/kafka/config/200-controller-clusterrole.yaml | 6 ++++++ contrib/kafka/config/{ => ccp}/README.md | 6 +++--- contrib/kafka/config/{ => ccp}/kafka.yaml | 0 3 files changed, 9 insertions(+), 3 deletions(-) rename contrib/kafka/config/{ => ccp}/README.md (93%) rename contrib/kafka/config/{ => ccp}/kafka.yaml (100%) diff --git a/contrib/kafka/config/200-controller-clusterrole.yaml b/contrib/kafka/config/200-controller-clusterrole.yaml index 2cb407bde1c..90c1cee2620 100644 --- a/contrib/kafka/config/200-controller-clusterrole.yaml +++ b/contrib/kafka/config/200-controller-clusterrole.yaml @@ -28,6 +28,12 @@ rules: - watch - update - patch + - apiGroups: + - messaging.knative.dev + resources: + - kafkachannels/finalizers + verbs: + - update - apiGroups: - "" # Core API group. resources: diff --git a/contrib/kafka/config/README.md b/contrib/kafka/config/ccp/README.md similarity index 93% rename from contrib/kafka/config/README.md rename to contrib/kafka/config/ccp/README.md index 2664fb684ee..37663270f9e 100644 --- a/contrib/kafka/config/README.md +++ b/contrib/kafka/config/ccp/README.md @@ -2,7 +2,7 @@ Deployment steps: -1. Setup [Knative Eventing](../../../DEVELOPMENT.md) +1. Setup [Knative Eventing](../../../../DEVELOPMENT.md) 1. If not done already, install an Apache Kafka cluster! - For Kubernetes a simple installation is done using the @@ -16,7 +16,7 @@ Deployment steps: 1. Now that Apache Kafka is installed, you need to configure the `bootstrap_servers` value in the `kafka-channel-controller-config` ConfigMap, - located inside the `contrib/kafka/config/kafka.yaml` file: + located inside the `contrib/kafka/config/ccp/kafka.yaml` file: ```yaml ... @@ -39,7 +39,7 @@ Deployment steps: 1. Apply the 'Kafka' ClusterChannelProvisioner, Controller, and Dispatcher: ``` - ko apply -f contrib/kafka/config/kafka.yaml + ko apply -f contrib/kafka/config/ccp/kafka.yaml ``` 1. Create Channels that reference the 'kafka' ClusterChannelProvisioner. diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/ccp/kafka.yaml similarity index 100% rename from contrib/kafka/config/kafka.yaml rename to contrib/kafka/config/ccp/kafka.yaml From 575099d7eefcb0516ec9ba9f5aec5fac9937461b Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Sun, 26 May 2019 10:12:21 -0700 Subject: [PATCH 55/64] unneeded TODO --- contrib/kafka/pkg/dispatcher/dispatcher.go | 1 - 1 file changed, 1 deletion(-) diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go index d896592e0f2..68a6a215fcc 100644 --- a/contrib/kafka/pkg/dispatcher/dispatcher.go +++ b/contrib/kafka/pkg/dispatcher/dispatcher.go @@ -210,7 +210,6 @@ func (d *KafkaDispatcher) subscribe(channelRef provisioners.ChannelReference, su d.logger.Info("Subscribing", zap.Any("channelRef", channelRef), zap.Any("subscription", sub)) topicName := d.topicFunc(utils.KafkaChannelSeparator, channelRef.Namespace, channelRef.Name) - // TODO check whether group can be the same for provisioner and CRD impl? group := fmt.Sprintf("%s.%s", controller.Name, sub.UID) consumer, err := d.kafkaCluster.NewConsumer(group, []string{topicName}) From ed544f041b875166642925b9f20e7b91545dad1c Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Sun, 26 May 2019 10:30:43 -0700 Subject: [PATCH 56/64] changing e2e kafka dir folder --- test/e2e-tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh index 85dc69d79f0..99145403349 100755 --- a/test/e2e-tests.sh +++ b/test/e2e-tests.sh @@ -55,7 +55,7 @@ readonly STRIMZI_INSTALLATION_CONFIG="$(mktemp)" # Kafka cluster CR config file. readonly KAFKA_INSTALLATION_CONFIG="test/config/100-kafka-persistent-single-2.1.0.yaml" # Kafka provisioner config template. -readonly KAFKA_CONFIG_TEMPLATE="contrib/kafka/config/kafka.yaml" +readonly KAFKA_CONFIG_TEMPLATE="contrib/kafka/config/ccp/kafka.yaml" # Real Kafka provisioner config, generated from the template. readonly KAFKA_CONFIG="$(mktemp)" # Kafka cluster URL for our installation From 8a54ef09e842e2653f8effa2c43d5a67206f3870 Mon Sep 17 00:00:00 2001 From: nachocano Date: Tue, 28 May 2019 10:35:26 -0700 Subject: [PATCH 57/64] adding new scheme --- contrib/kafka/cmd/channel_controller/main.go | 5 +++++ contrib/kafka/cmd/channel_dispatcher/main.go | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/contrib/kafka/cmd/channel_controller/main.go b/contrib/kafka/cmd/channel_controller/main.go index ac4e168afc6..95212a0b8c8 100644 --- a/contrib/kafka/cmd/channel_controller/main.go +++ b/contrib/kafka/cmd/channel_controller/main.go @@ -22,6 +22,7 @@ import ( "log" clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + eventingScheme "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/scheme" informers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions" kafkachannel "github.com/knative/eventing/contrib/kafka/pkg/reconciler/controller" "github.com/knative/eventing/pkg/logconfig" @@ -33,6 +34,7 @@ import ( "github.com/knative/pkg/system" "go.uber.org/zap" kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -90,6 +92,9 @@ func main() { endpointsInformer := kubeInformerFactory.Core().V1().Endpoints() deploymentInformer := kubeInformerFactory.Apps().V1().Deployments() + // Adding the scheme. + eventingScheme.AddToScheme(scheme.Scheme) + // Build all of our controllers, with the clients constructed above. // Add new controllers to this array. // You also need to modify numControllers above to match this. diff --git a/contrib/kafka/cmd/channel_dispatcher/main.go b/contrib/kafka/cmd/channel_dispatcher/main.go index 0d1cedd5f45..1c9c6f0b9f9 100644 --- a/contrib/kafka/cmd/channel_dispatcher/main.go +++ b/contrib/kafka/cmd/channel_dispatcher/main.go @@ -22,6 +22,7 @@ import ( "log" clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + eventingScheme "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/scheme" informers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions" "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" kafkachannel "github.com/knative/eventing/contrib/kafka/pkg/reconciler/dispatcher" @@ -32,6 +33,7 @@ import ( kncontroller "github.com/knative/pkg/controller" "github.com/knative/pkg/signals" "go.uber.org/zap" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -86,6 +88,9 @@ func main() { // Messaging kafkaChannelInformer := eventingInformerFactory.Messaging().V1alpha1().KafkaChannels() + // Adding the scheme. + eventingScheme.AddToScheme(scheme.Scheme) + // Build all of our controllers, with the clients constructed above. // Add new controllers to this array. // You also need to modify numControllers above to match this. From cfc1e65e67e51758ffc34f0c826f2d63053ecd48 Mon Sep 17 00:00:00 2001 From: nachocano Date: Tue, 28 May 2019 12:45:49 -0700 Subject: [PATCH 58/64] renamed ccp to provisioner --- contrib/kafka/config/{ccp => provisioner}/README.md | 4 ++-- contrib/kafka/config/{ccp => provisioner}/kafka.yaml | 0 test/e2e-tests.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) rename contrib/kafka/config/{ccp => provisioner}/README.md (95%) rename contrib/kafka/config/{ccp => provisioner}/kafka.yaml (100%) diff --git a/contrib/kafka/config/ccp/README.md b/contrib/kafka/config/provisioner/README.md similarity index 95% rename from contrib/kafka/config/ccp/README.md rename to contrib/kafka/config/provisioner/README.md index 37663270f9e..f67ea4c1e3b 100644 --- a/contrib/kafka/config/ccp/README.md +++ b/contrib/kafka/config/provisioner/README.md @@ -16,7 +16,7 @@ Deployment steps: 1. Now that Apache Kafka is installed, you need to configure the `bootstrap_servers` value in the `kafka-channel-controller-config` ConfigMap, - located inside the `contrib/kafka/config/ccp/kafka.yaml` file: + located inside the `contrib/kafka/config/provisioner/kafka.yaml` file: ```yaml ... @@ -39,7 +39,7 @@ Deployment steps: 1. Apply the 'Kafka' ClusterChannelProvisioner, Controller, and Dispatcher: ``` - ko apply -f contrib/kafka/config/ccp/kafka.yaml + ko apply -f contrib/kafka/config/provisioner/kafka.yaml ``` 1. Create Channels that reference the 'kafka' ClusterChannelProvisioner. diff --git a/contrib/kafka/config/ccp/kafka.yaml b/contrib/kafka/config/provisioner/kafka.yaml similarity index 100% rename from contrib/kafka/config/ccp/kafka.yaml rename to contrib/kafka/config/provisioner/kafka.yaml diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh index 99145403349..c96b695d477 100755 --- a/test/e2e-tests.sh +++ b/test/e2e-tests.sh @@ -55,7 +55,7 @@ readonly STRIMZI_INSTALLATION_CONFIG="$(mktemp)" # Kafka cluster CR config file. readonly KAFKA_INSTALLATION_CONFIG="test/config/100-kafka-persistent-single-2.1.0.yaml" # Kafka provisioner config template. -readonly KAFKA_CONFIG_TEMPLATE="contrib/kafka/config/ccp/kafka.yaml" +readonly KAFKA_CONFIG_TEMPLATE="contrib/kafka/config/provisioner/kafka.yaml" # Real Kafka provisioner config, generated from the template. readonly KAFKA_CONFIG="$(mktemp)" # Kafka cluster URL for our installation From 9a9141557a7bba5b509d264cf18b8c5be94aa486 Mon Sep 17 00:00:00 2001 From: nachocano Date: Tue, 28 May 2019 15:41:42 -0700 Subject: [PATCH 59/64] passing interface instead --- contrib/kafka/pkg/reconciler/controller/kafkachannel.go | 6 +++--- contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go index 2ef45bdd57e..642f16b5ed1 100644 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -27,7 +27,7 @@ import ( "github.com/Shopify/sarama" "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" - "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" messaginginformers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1" listers "github.com/knative/eventing/contrib/kafka/pkg/client/listers/messaging/v1alpha1" "github.com/knative/eventing/contrib/kafka/pkg/reconciler/controller/resources" @@ -78,7 +78,7 @@ type Reconciler struct { // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. kafkaClusterAdmin sarama.ClusterAdmin - eventingClientSet *versioned.Clientset + eventingClientSet clientset.Interface kafkachannelLister listers.KafkaChannelLister kafkachannelInformer cache.SharedIndexInformer deploymentLister appsv1listers.DeploymentLister @@ -102,7 +102,7 @@ var _ cache.ResourceEventHandler = (*Reconciler)(nil) // Registers event handlers to enqueue events. func NewController( opt reconciler.Options, - eventingClientSet *versioned.Clientset, + eventingClientSet clientset.Interface, kafkaConfig *utils.KafkaConfig, dispatcherNamespace string, dispatcherDeploymentName string, diff --git a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go index 3f746aa6c8d..1e4287924f4 100644 --- a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go @@ -19,7 +19,7 @@ package controller import ( "context" "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" - "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" + clientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned" messaginginformers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions/messaging/v1alpha1" listers "github.com/knative/eventing/contrib/kafka/pkg/client/listers/messaging/v1alpha1" "github.com/knative/eventing/contrib/kafka/pkg/dispatcher" @@ -47,7 +47,7 @@ type Reconciler struct { kafkaDispatcher *dispatcher.KafkaDispatcher - eventingClientSet *versioned.Clientset + eventingClientSet clientset.Interface kafkachannelLister listers.KafkaChannelLister kafkachannelInformer cache.SharedIndexInformer impl *controller.Impl @@ -60,7 +60,7 @@ var _ controller.Reconciler = (*Reconciler)(nil) // Registers event handlers to enqueue events. func NewController( opt reconciler.Options, - eventingClientSet *versioned.Clientset, + eventingClientSet clientset.Interface, kafkaDispatcher *dispatcher.KafkaDispatcher, kafkachannelInformer messaginginformers.KafkaChannelInformer, ) *controller.Impl { From d41d5d3316e1ca62ab47bb452ce2cade48fd9dea Mon Sep 17 00:00:00 2001 From: nachocano Date: Tue, 28 May 2019 16:10:53 -0700 Subject: [PATCH 60/64] new function available --- contrib/kafka/pkg/reconciler/controller/kafkachannel.go | 2 +- contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go index 642f16b5ed1..1b1d83c659d 100644 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -126,7 +126,7 @@ func NewController( serviceLister: serviceInformer.Lister(), endpointsLister: endpointsInformer.Lister(), } - r.impl = controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger)) + r.impl = controller.NewImpl(r, r.Logger, ReconcilerName) r.Logger.Info("Setting up event handlers") kafkachannelInformer.Informer().AddEventHandler(reconciler.Handler(r.impl.Enqueue)) diff --git a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go index 1e4287924f4..877f4356a9c 100644 --- a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go @@ -72,7 +72,7 @@ func NewController( kafkachannelLister: kafkachannelInformer.Lister(), kafkachannelInformer: kafkachannelInformer.Informer(), } - r.impl = controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger)) + r.impl = controller.NewImpl(r, r.Logger, ReconcilerName) r.Logger.Info("Setting up event handlers") From 2876113df5bd13946b63549e631d6c1e5f7456f8 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 28 May 2019 18:23:23 -0700 Subject: [PATCH 61/64] removing commented UT to add it in a follow up PR --- .../controller/kafkachannel_test.go | 433 ------------------ 1 file changed, 433 deletions(-) delete mode 100644 contrib/kafka/pkg/reconciler/controller/kafkachannel_test.go diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel_test.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel_test.go deleted file mode 100644 index b40f9a01d6c..00000000000 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel_test.go +++ /dev/null @@ -1,433 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - . "github.com/knative/eventing/contrib/kafka/pkg/utils" - "testing" - - "github.com/knative/eventing/contrib/kafka/pkg/apis/messaging/v1alpha1" - fakeclientset "github.com/knative/eventing/contrib/kafka/pkg/client/clientset/versioned/fake" - informers "github.com/knative/eventing/contrib/kafka/pkg/client/informers/externalversions" - "github.com/knative/eventing/pkg/reconciler" - reconciletesting "github.com/knative/eventing/pkg/reconciler/testing" - "github.com/knative/eventing/pkg/utils" - duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" - "github.com/knative/pkg/controller" - "github.com/knative/pkg/kmeta" - logtesting "github.com/knative/pkg/logging/testing" - . "github.com/knative/pkg/reconciler/testing" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeinformers "k8s.io/client-go/informers" - fakekubeclientset "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/kubernetes/scheme" -) - -const ( - systemNS = "knative-eventing" - testNS = "test-namespace" - kcName = "test-kc" - dispatcherDeploymentName = "test-deployment" - dispatcherServiceName = "test-service" - channelServiceAddress = "test-kc-kn-channel.test-namespace.svc.cluster.local" - - subscriberAPIVersion = "v1" - subscriberKind = "Service" - subscriberName = "subscriberName" - subscriberURI = "http://example.com/subscriber" -) - -var ( - trueVal = true - // deletionTime is used when objects are marked as deleted. Rfc3339Copy() - // truncates to seconds to match the loss of precision during serialization. - deletionTime = metav1.Now().Rfc3339Copy() -) - -func init() { - // Add types to scheme - _ = v1alpha1.AddToScheme(scheme.Scheme) - _ = duckv1alpha1.AddToScheme(scheme.Scheme) -} - -func TestNewController(t *testing.T) { - kubeClient := fakekubeclientset.NewSimpleClientset() - eventingClient := fakeclientset.NewSimpleClientset() - - // Create informer factories with fake clients. The second parameter sets the - // resync period to zero, disabling it. - kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, 0) - eventingInformerFactory := informers.NewSharedInformerFactory(eventingClient, 0) - - kafkaChannelInformer := eventingInformerFactory.Messaging().V1alpha1().KafkaChannels() - - // Kube - serviceInformer := kubeInformerFactory.Core().V1().Services() - endpointsInformer := kubeInformerFactory.Core().V1().Endpoints() - deploymentInformer := kubeInformerFactory.Apps().V1().Deployments() - - kafkaConfig := &KafkaConfig{ - Brokers: []string{"boostrap_server"}, - } - - c := NewController( - reconciler.Options{ - KubeClientSet: kubeClient, - Logger: logtesting.TestLogger(t), - }, - nil, // TODO fix this - kafkaConfig, - systemNS, - dispatcherDeploymentName, - dispatcherServiceName, - kafkaChannelInformer, - deploymentInformer, - serviceInformer, - endpointsInformer) - - if c == nil { - t.Fatalf("Failed to create with NewController") - } -} - -func TestAllCases(t *testing.T) { - // imcKey := testNS + "/" + kcName - table := TableTest{ - { - Name: "bad workqueue key", - // Make sure Reconcile handles bad keys. - Key: "too/many/parts", - }, - //}, { - // Name: "key not found", - // // Make sure Reconcile handles good keys that don't exist. - // Key: "foo/not-found", - //}, - // }, { // TODO: there is a bug in the controller, it will query for "" - // Name: "trigger key not found ", - // Objects: []runtime.Object{ - // reconciletesting.NewTrigger(triggerName, testNS), - // }, - // Key: "foo/incomplete", - // WantErr: true, - // WantEvents: []string{ - // Eventf(corev1.EventTypeWarning, "ChannelReferenceFetchFailed", "Failed to validate spec.channel exists: s \"\" not found"), - // }, - //}, { - // Name: "deleting", - // Key: imcKey, - // Objects: []runtime.Object{ - // reconciletesting.NewInMemoryChannel(imcName, testNS, - // reconciletesting.WithInitInMemoryChannelConditions, - // reconciletesting.WithInMemoryChannelDeleted)}, - // WantErr: false, - // WantEvents: []string{ - // Eventf(corev1.EventTypeNormal, "Reconciled", "InMemoryChannel reconciled"), - // }, - //}, { - // Name: "deployment does not exist", - // Key: imcKey, - // Objects: []runtime.Object{ - // reconciletesting.NewInMemoryChannel(imcName, testNS), - // }, - // WantErr: true, - // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, - // reconciletesting.WithInitInMemoryChannelConditions, - // reconciletesting.WithInMemoryChannelDeploymentNotReady("DispatcherDeploymentDoesNotExist", "Dispatcher Deployment does not exist")), - // }}, - // WantEvents: []string{ - // Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: deployment.apps \"test-deployment\" not found"), - // }, - //}, { - // Name: "Service does not exist", - // Key: imcKey, - // Objects: []runtime.Object{ - // makeReadyDeployment(), - // reconciletesting.NewInMemoryChannel(imcName, testNS), - // }, - // WantErr: true, - // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, - // reconciletesting.WithInitInMemoryChannelConditions, - // reconciletesting.WithInMemoryChannelDeploymentReady(), - // reconciletesting.WithInMemoryChannelServicetNotReady("DispatcherServiceDoesNotExist", "Dispatcher Service does not exist")), - // }}, - // WantEvents: []string{ - // Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: service \"test-service\" not found"), - // }, - //}, { - // Name: "Endpoints does not exist", - // Key: imcKey, - // Objects: []runtime.Object{ - // makeReadyDeployment(), - // makeService(), - // reconciletesting.NewInMemoryChannel(imcName, testNS), - // }, - // WantErr: true, - // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, - // reconciletesting.WithInitInMemoryChannelConditions, - // reconciletesting.WithInMemoryChannelDeploymentReady(), - // reconciletesting.WithInMemoryChannelServiceReady(), - // reconciletesting.WithInMemoryChannelEndpointsNotReady("DispatcherEndpointsDoesNotExist", "Dispatcher Endpoints does not exist"), - // ), - // }}, - // WantEvents: []string{ - // Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: endpoints \"test-service\" not found"), - // }, - //}, { - // Name: "Endpoints not ready", - // Key: imcKey, - // Objects: []runtime.Object{ - // makeReadyDeployment(), - // makeService(), - // makeEmptyEndpoints(), - // reconciletesting.NewInMemoryChannel(imcName, testNS), - // }, - // WantErr: true, - // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, - // reconciletesting.WithInitInMemoryChannelConditions, - // reconciletesting.WithInMemoryChannelDeploymentReady(), - // reconciletesting.WithInMemoryChannelServiceReady(), - // reconciletesting.WithInMemoryChannelEndpointsNotReady("DispatcherEndpointsNotReady", "There are no endpoints ready for Dispatcher service"), - // ), - // }}, - // WantEvents: []string{ - // Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: there are no endpoints ready for Dispatcher service"), - // }, - //}, { - // Name: "Works, creates new channel", - // Key: imcKey, - // Objects: []runtime.Object{ - // makeReadyDeployment(), - // makeService(), - // makeReadyEndpoints(), - // reconciletesting.NewInMemoryChannel(imcName, testNS), - // }, - // WantErr: false, - // WantCreates: []metav1.Object{ - // makeChannelService(reconciletesting.NewInMemoryChannel(imcName, testNS)), - // }, - // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, - // reconciletesting.WithInitInMemoryChannelConditions, - // reconciletesting.WithInMemoryChannelDeploymentReady(), - // reconciletesting.WithInMemoryChannelServiceReady(), - // reconciletesting.WithInMemoryChannelEndpointsReady(), - // reconciletesting.WithInMemoryChannelChannelServiceReady(), - // reconciletesting.WithInMemoryChannelAddress(channelServiceAddress), - // ), - // }}, - // WantEvents: []string{ - // Eventf(corev1.EventTypeNormal, "Reconciled", "InMemoryChannel reconciled"), - // }, - //}, { - // Name: "Works, channel exists", - // Key: imcKey, - // Objects: []runtime.Object{ - // makeReadyDeployment(), - // makeService(), - // makeReadyEndpoints(), - // reconciletesting.NewInMemoryChannel(imcName, testNS), - // makeChannelService(reconciletesting.NewInMemoryChannel(imcName, testNS)), - // }, - // WantErr: false, - // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, - // reconciletesting.WithInitInMemoryChannelConditions, - // reconciletesting.WithInMemoryChannelDeploymentReady(), - // reconciletesting.WithInMemoryChannelServiceReady(), - // reconciletesting.WithInMemoryChannelEndpointsReady(), - // reconciletesting.WithInMemoryChannelChannelServiceReady(), - // reconciletesting.WithInMemoryChannelAddress(channelServiceAddress), - // ), - // }}, - // WantEvents: []string{ - // Eventf(corev1.EventTypeNormal, "Reconciled", "InMemoryChannel reconciled"), - // }, - //}, { - // Name: "channel exists, not owned by us", - // Key: imcKey, - // Objects: []runtime.Object{ - // makeReadyDeployment(), - // makeService(), - // makeReadyEndpoints(), - // reconciletesting.NewInMemoryChannel(imcName, testNS), - // makeChannelServiceNotOwnedByUs(reconciletesting.NewInMemoryChannel(imcName, testNS)), - // }, - // WantErr: true, - // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, - // reconciletesting.WithInitInMemoryChannelConditions, - // reconciletesting.WithInMemoryChannelDeploymentReady(), - // reconciletesting.WithInMemoryChannelServiceReady(), - // reconciletesting.WithInMemoryChannelEndpointsReady(), - // reconciletesting.WithInMemoryChannelChannelServicetNotReady("ChannelServiceFailed", "Channel Service failed: inmemorychannel: test-namespace/test-imc does not own Service: \"test-imc-kn-channel\""), - // ), - // }}, - // WantEvents: []string{ - // Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: inmemorychannel: test-namespace/test-imc does not own Service: \"test-imc-kn-channel\""), - // }, - //}, { - // Name: "channel does not exist, fails to create", - // Key: imcKey, - // Objects: []runtime.Object{ - // makeReadyDeployment(), - // makeService(), - // makeReadyEndpoints(), - // reconciletesting.NewInMemoryChannel(imcName, testNS), - // }, - // WantErr: true, - // WithReactors: []clientgotesting.ReactionFunc{ - // InduceFailure("create", "Services"), - // }, - // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ - // Object: reconciletesting.NewInMemoryChannel(imcName, testNS, - // reconciletesting.WithInitInMemoryChannelConditions, - // reconciletesting.WithInMemoryChannelDeploymentReady(), - // reconciletesting.WithInMemoryChannelServiceReady(), - // reconciletesting.WithInMemoryChannelEndpointsReady(), - // reconciletesting.WithInMemoryChannelChannelServicetNotReady("ChannelServiceFailed", "Channel Service failed: inducing failure for create services"), - // ), - // }}, - // WantCreates: []metav1.Object{ - // makeChannelService(reconciletesting.NewInMemoryChannel(imcName, testNS)), - // }, - // WantEvents: []string{ - // Eventf(corev1.EventTypeWarning, "ReconcileFailed", "InMemoryChannel reconciliation failed: inducing failure for create services"), - // }, - //}, {}, - } - defer logtesting.ClearAll() - - table.Test(t, reconciletesting.MakeFactory(func(listers *reconciletesting.Listers, opt reconciler.Options) controller.Reconciler { - return &Reconciler{ - Base: reconciler.NewBase(opt, controllerAgentName), - dispatcherNamespace: testNS, - dispatcherDeploymentName: dispatcherDeploymentName, - dispatcherServiceName: dispatcherServiceName, - // TODO: Fix - kafkachannelLister: nil, - kafkachannelInformer: nil, - deploymentLister: listers.GetDeploymentLister(), - serviceLister: listers.GetServiceLister(), - endpointsLister: listers.GetEndpointsLister(), - } - }, - false, - )) -} - -func makeDeployment() *appsv1.Deployment { - return &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "apps/v1", - Kind: "Deployment", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNS, - Name: dispatcherDeploymentName, - }, - Status: appsv1.DeploymentStatus{}, - } -} - -func makeReadyDeployment() *appsv1.Deployment { - d := makeDeployment() - d.Status.Conditions = []appsv1.DeploymentCondition{{Type: appsv1.DeploymentAvailable, Status: corev1.ConditionTrue}} - return d -} - -func makeService() *corev1.Service { - return &corev1.Service{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Service", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNS, - Name: dispatcherServiceName, - }, - } -} - -func makeChannelService(kc *v1alpha1.KafkaChannel) *corev1.Service { - return &corev1.Service{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Service", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNS, - Name: fmt.Sprintf("%s-kn-channel", kcName), - Labels: map[string]string{ - "messaging.knative.dev/role": "kafka-channel", - }, - OwnerReferences: []metav1.OwnerReference{ - *kmeta.NewControllerRef(kc), - }, - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeExternalName, - ExternalName: fmt.Sprintf("%s.%s.svc.%s", dispatcherServiceName, testNS, utils.GetClusterDomainName()), - }, - } -} - -func makeChannelServiceNotOwnedByUs(kc *v1alpha1.KafkaChannel) *corev1.Service { - return &corev1.Service{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Service", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNS, - Name: fmt.Sprintf("%s-kn-channel", kcName), - Labels: map[string]string{ - "messaging.knative.dev/role": "kafka-channel", - }, - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeExternalName, - ExternalName: fmt.Sprintf("%s.%s.svc.%s", dispatcherServiceName, testNS, utils.GetClusterDomainName()), - }, - } -} - -func makeEmptyEndpoints() *corev1.Endpoints { - return &corev1.Endpoints{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Endpoints", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: testNS, - Name: dispatcherServiceName, - }, - } -} - -func makeReadyEndpoints() *corev1.Endpoints { - e := makeEmptyEndpoints() - e.Subsets = []corev1.EndpointSubset{{Addresses: []corev1.EndpointAddress{{IP: "1.1.1.1"}}}} - return e -} From f21fc7369d1a404631f13d8862db90d39ef51478 Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Tue, 28 May 2019 18:25:46 -0700 Subject: [PATCH 62/64] update after merge --- contrib/kafka/pkg/reconciler/controller/kafkachannel.go | 2 +- contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go index 1b1d83c659d..48b9416b80d 100644 --- a/contrib/kafka/pkg/reconciler/controller/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/controller/kafkachannel.go @@ -129,7 +129,7 @@ func NewController( r.impl = controller.NewImpl(r, r.Logger, ReconcilerName) r.Logger.Info("Setting up event handlers") - kafkachannelInformer.Informer().AddEventHandler(reconciler.Handler(r.impl.Enqueue)) + kafkachannelInformer.Informer().AddEventHandler(controller.HandleAll(r.impl.Enqueue)) // Set up watches for dispatcher resources we care about, since any changes to these // resources will affect our Channels. So, set up a watch here, that will cause diff --git a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go index 877f4356a9c..9c2a54a4668 100644 --- a/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go +++ b/contrib/kafka/pkg/reconciler/dispatcher/kafkachannel.go @@ -77,7 +77,7 @@ func NewController( r.Logger.Info("Setting up event handlers") // Watch for kafka channels. - kafkachannelInformer.Informer().AddEventHandler(reconciler.Handler(r.impl.Enqueue)) + kafkachannelInformer.Informer().AddEventHandler(controller.HandleAll(r.impl.Enqueue)) return r.impl } From 8f71f9fbc4075b381f3e78b2e0050a0f2bd70622 Mon Sep 17 00:00:00 2001 From: nachocano Date: Wed, 29 May 2019 14:02:33 -0700 Subject: [PATCH 63/64] adding the subscribable label --- contrib/kafka/config/300-kafka-channel.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/kafka/config/300-kafka-channel.yaml b/contrib/kafka/config/300-kafka-channel.yaml index 4838ad28833..90d9aec28b9 100644 --- a/contrib/kafka/config/300-kafka-channel.yaml +++ b/contrib/kafka/config/300-kafka-channel.yaml @@ -18,6 +18,7 @@ metadata: name: kafkachannels.messaging.knative.dev labels: knative.dev/crd-install: "true" + messaging.knative.dev/subscribable: "true" spec: group: messaging.knative.dev version: v1alpha1 From 21aa9ac3b90026c11fa839dee87ead274e6fcabe Mon Sep 17 00:00:00 2001 From: Nacho Cano Date: Fri, 31 May 2019 07:56:55 -0700 Subject: [PATCH 64/64] renaming in trunk --- .../messaging/v1alpha1/kafka_channel_validation_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go index ba378fb41b4..e5269ad70a5 100644 --- a/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go +++ b/contrib/kafka/pkg/apis/messaging/v1alpha1/kafka_channel_validation_test.go @@ -74,7 +74,7 @@ func TestKafkaChannelValidation(t *testing.T) { NumPartitions: 1, ReplicationFactor: 1, Subscribable: &eventingduck.Subscribable{ - Subscribers: []eventingduck.ChannelSubscriberSpec{{ + Subscribers: []eventingduck.SubscriberSpec{{ SubscriberURI: "subscriberendpoint", ReplyURI: "resultendpoint", }}, @@ -88,7 +88,7 @@ func TestKafkaChannelValidation(t *testing.T) { NumPartitions: 1, ReplicationFactor: 1, Subscribable: &eventingduck.Subscribable{ - Subscribers: []eventingduck.ChannelSubscriberSpec{{ + Subscribers: []eventingduck.SubscriberSpec{{ SubscriberURI: "subscriberendpoint", ReplyURI: "replyendpoint", }, {}}, @@ -106,7 +106,7 @@ func TestKafkaChannelValidation(t *testing.T) { NumPartitions: 1, ReplicationFactor: 1, Subscribable: &eventingduck.Subscribable{ - Subscribers: []eventingduck.ChannelSubscriberSpec{{}, {}}, + Subscribers: []eventingduck.SubscriberSpec{{}, {}}, }, }, },