From 8ea7c1349f56774866237ac57b9c428fb623fbbf Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Wed, 26 Sep 2018 18:21:35 -0700 Subject: [PATCH 01/20] Kafka Channel Provisioner Controllers --- .../kafka/clusterprovisioner.yaml | 22 ++ config/provisioners/kafka/clusterrole.yaml | 31 +++ .../kafka/clusterrolebinding.yaml | 26 ++ config/provisioners/kafka/controller.yaml | 34 +++ .../kafka/kafka-provisioner-config.yaml | 26 ++ .../provisioners/kafka/service-account.yaml | 19 ++ pkg/apis/eventing/v1alpha1/channel_types.go | 10 + .../v1alpha1/cluster_provisioner_types.go | 22 +- .../kafka/controller/channel/provider.go | 78 ++++++ .../kafka/controller/channel/reconcile.go | 147 +++++++++++ .../controller/channel/reconcile_test.go | 232 ++++++++++++++++++ pkg/provisioners/kafka/controller/provider.go | 78 ++++++ .../kafka/controller/reconcile.go | 123 ++++++++++ .../kafka/controller/reconcile_test.go | 174 +++++++++++++ pkg/provisioners/kafka/controller/util.go | 66 +++++ pkg/provisioners/kafka/main.go | 71 ++++++ 16 files changed, 1158 insertions(+), 1 deletion(-) create mode 100644 config/provisioners/kafka/clusterprovisioner.yaml create mode 100644 config/provisioners/kafka/clusterrole.yaml create mode 100644 config/provisioners/kafka/clusterrolebinding.yaml create mode 100644 config/provisioners/kafka/controller.yaml create mode 100644 config/provisioners/kafka/kafka-provisioner-config.yaml create mode 100644 config/provisioners/kafka/service-account.yaml create mode 100644 pkg/provisioners/kafka/controller/channel/provider.go create mode 100644 pkg/provisioners/kafka/controller/channel/reconcile.go create mode 100644 pkg/provisioners/kafka/controller/channel/reconcile_test.go create mode 100644 pkg/provisioners/kafka/controller/provider.go create mode 100644 pkg/provisioners/kafka/controller/reconcile.go create mode 100644 pkg/provisioners/kafka/controller/reconcile_test.go create mode 100644 pkg/provisioners/kafka/controller/util.go create mode 100644 pkg/provisioners/kafka/main.go diff --git a/config/provisioners/kafka/clusterprovisioner.yaml b/config/provisioners/kafka/clusterprovisioner.yaml new file mode 100644 index 00000000000..202f9adcab4 --- /dev/null +++ b/config/provisioners/kafka/clusterprovisioner.yaml @@ -0,0 +1,22 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: eventing.knative.dev/v1alpha1 +kind: ClusterProvisioner +metadata: + name: kafka +spec: + reconciles: + group: eventing.knative.dev + kind: Channel \ No newline at end of file diff --git a/config/provisioners/kafka/clusterrole.yaml b/config/provisioners/kafka/clusterrole.yaml new file mode 100644 index 00000000000..3298a12f984 --- /dev/null +++ b/config/provisioners/kafka/clusterrole.yaml @@ -0,0 +1,31 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kafka-provisioner +rules: +- apiGroups: ["eventing.knative.dev"] + resources: ["clusterprovisioners"] + verbs: ["get", "watch", "list", "update", "patch"] +- apiGroups: ["eventing.knative.dev"] + resources: ["channels"] + verbs: ["get", "watch", "list", "update", "patch"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "watch", "list"] \ No newline at end of file diff --git a/config/provisioners/kafka/clusterrolebinding.yaml b/config/provisioners/kafka/clusterrolebinding.yaml new file mode 100644 index 00000000000..d3789e784f8 --- /dev/null +++ b/config/provisioners/kafka/clusterrolebinding.yaml @@ -0,0 +1,26 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: kafka-provisioner-manage +subjects: + - kind: ServiceAccount + name: kafka-provisioner + namespace: knative-eventing +roleRef: + kind: ClusterRole + name: kafka-provisioner + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/config/provisioners/kafka/controller.yaml b/config/provisioners/kafka/controller.yaml new file mode 100644 index 00000000000..2bd28585745 --- /dev/null +++ b/config/provisioners/kafka/controller.yaml @@ -0,0 +1,34 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: kafka-provisioner + namespace: knative-eventing +spec: + replicas: 1 + template: + metadata: + labels: + app: kafka-provisioner + spec: + serviceAccountName: kafka-provisioner + containers: + - name: kafka-provisioner-controller + image: github.com/knative/eventing/pkg/provisioners/kafka + args: [ + "-logtostderr", + "-stderrthreshold", "INFO", + ] \ No newline at end of file diff --git a/config/provisioners/kafka/kafka-provisioner-config.yaml b/config/provisioners/kafka/kafka-provisioner-config.yaml new file mode 100644 index 00000000000..9f67cf64852 --- /dev/null +++ b/config/provisioners/kafka/kafka-provisioner-config.yaml @@ -0,0 +1,26 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: kafka-provisioner-config + namespace: knative-eventing +data: + # Name of the provisioner that this controller represents + provisioner-name: kafka + # Leave namespace empty string for cluster provisioner + provisioner-namespace: "" + # Broker URL's for the provisioner + brokers: kafkabroker.kafka:9092 \ No newline at end of file diff --git a/config/provisioners/kafka/service-account.yaml b/config/provisioners/kafka/service-account.yaml new file mode 100644 index 00000000000..bacb3e751e0 --- /dev/null +++ b/config/provisioners/kafka/service-account.yaml @@ -0,0 +1,19 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka-provisioner + namespace: knative-eventing \ No newline at end of file diff --git a/pkg/apis/eventing/v1alpha1/channel_types.go b/pkg/apis/eventing/v1alpha1/channel_types.go index 07c353123cd..092a6805dff 100644 --- a/pkg/apis/eventing/v1alpha1/channel_types.go +++ b/pkg/apis/eventing/v1alpha1/channel_types.go @@ -145,6 +145,16 @@ func (cs *ChannelStatus) SetConditions(conditions duckv1alpha1.Conditions) { cs.Conditions = conditions } +// InitializeConditions sets relevant unset conditions to Unknown state. +func (cs *ChannelStatus) InitializeConditions() { + chanCondSet.Manage(cs).InitializeConditions() +} + +// InitializeConditions sets relevant unset conditions to Unknown state. +func (cs *ChannelStatus) MarkAsNotProvisioned(reason, messageFormat string, messageA ...interface{}) { + chanCondSet.Manage(cs).MarkFalse(ChannelConditionProvisioned, reason, messageFormat, messageA...) +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ChannelList is a collection of Channels. diff --git a/pkg/apis/eventing/v1alpha1/cluster_provisioner_types.go b/pkg/apis/eventing/v1alpha1/cluster_provisioner_types.go index 3be26f37d00..0d9f5c502c8 100644 --- a/pkg/apis/eventing/v1alpha1/cluster_provisioner_types.go +++ b/pkg/apis/eventing/v1alpha1/cluster_provisioner_types.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "encoding/json" + "github.com/knative/pkg/apis" "github.com/knative/pkg/apis/duck" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" @@ -74,7 +75,7 @@ type ClusterProvisionerSpec struct { Reconciles metav1.GroupKind `json:"reconciles"` } -var cProvCondSet = duckv1alpha1.NewLivingConditionSet() +var cProvCondSet = duckv1alpha1.NewLivingConditionSet(ClusterProvisionerConditionProvisionerReady) // ClusterProvisionerStatus is the status for a ClusterProvisioner resource type ClusterProvisionerStatus struct { @@ -90,6 +91,15 @@ type ClusterProvisionerStatus struct { ObservedGeneration int64 `json:"observedGeneration,omitempty"` } +const ( + + // ClusterProvisionerConditionReady has status True when all subconditions below have been set to True. + ClusterProvisionerConditionReady = duckv1alpha1.ConditionReady + + // ClusterProvisionerConditionProvisionerReady has status True when the provisioner is ready + ClusterProvisionerConditionProvisionerReady duckv1alpha1.ConditionType = "ProvisionerReady" +) + // GetSpecJSON returns spec as json func (p *ClusterProvisioner) GetSpecJSON() ([]byte, error) { return json.Marshal(p.Spec) @@ -111,6 +121,16 @@ func (ps *ClusterProvisionerStatus) IsReady() bool { return cProvCondSet.Manage(ps).IsHappy() } +// MarkProvisionerReady sets the condition that the provisioner is ready to provision backing resource. +func (ps *ClusterProvisionerStatus) MarkProvisionerReady() { + cProvCondSet.Manage(ps).MarkTrue(ClusterProvisionerConditionProvisionerReady) +} + +// MarkProvisionerNotReady sets the condition that the provisioner is not ready to provision backing resource. +func (ps *ClusterProvisionerStatus) MarkProvisionerNotReady(reason, messageFormat string, messageA ...interface{}) { + cProvCondSet.Manage(ps).MarkFalse(ClusterProvisionerConditionProvisionerReady, reason, messageFormat, messageA...) +} + // InitializeConditions sets relevant unset conditions to Unknown state. func (ps *ClusterProvisionerStatus) InitializeConditions() { cProvCondSet.Manage(ps).InitializeConditions() diff --git a/pkg/provisioners/kafka/controller/channel/provider.go b/pkg/provisioners/kafka/controller/channel/provider.go new file mode 100644 index 00000000000..61b51ff3e45 --- /dev/null +++ b/pkg/provisioners/kafka/controller/channel/provider.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package channel + +import ( + "github.com/go-logr/logr" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" +) + +const ( + // controllerAgentName is the string used by this controller to identify + // itself when creating events. + controllerAgentName = "kafka-provisioner-channel-controller" +) + +type reconciler struct { + client client.Client + restConfig *rest.Config + recorder record.EventRecorder + log logr.Logger +} + +// Verify the struct implements reconcile.Reconciler +var _ reconcile.Reconciler = &reconciler{} + +// ProvideController returns a Channel controller. +func ProvideController(mgr manager.Manager, log logr.Logger) (controller.Controller, error) { + // Setup a new controller to Reconcile Channel. + c, err := controller.New(controllerAgentName, mgr, controller.Options{ + Reconciler: &reconciler{ + recorder: mgr.GetRecorder(controllerAgentName), + log: log, + }, + }) + if err != nil { + return nil, err + } + + // Watch Channel events and enqueue Channel object key. + if err := c.Watch(&source.Kind{Type: &v1alpha1.Channel{}}, &handler.EnqueueRequestForObject{}); err != nil { + return nil, err + } + + return c, nil +} + +func (r *reconciler) InjectClient(c client.Client) error { + r.client = c + return nil +} + +func (r *reconciler) InjectConfig(c *rest.Config) error { + r.restConfig = c + return nil +} diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go new file mode 100644 index 00000000000..dcf0423a725 --- /dev/null +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -0,0 +1,147 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package channel + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/provisioners/kafka/controller" +) + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Channel resource +// with the current status of the resource. +func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) { + r.log.Info("Reconciling channel", "request", request) + channel := &v1alpha1.Channel{} + err := r.client.Get(context.TODO(), request.NamespacedName, channel) + + if errors.IsNotFound(err) { + r.log.Info("could not find channel", "request", request) + return reconcile.Result{}, nil + } + + if err != nil { + r.log.Error(err, "could not fetch Channel", "request", request) + return reconcile.Result{}, err + } + + original := channel.DeepCopy() + + // Reconcile this copy of the Channel and then write back any status + // updates regardless of whether the reconcile error out. + err = r.reconcile(channel) + if !equality.Semantic.DeepEqual(original.Status, channel.Status) { + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the informer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + if _, err := r.updateStatus(channel); err != nil { + r.log.Info("Failed to update channel status", "error", err) + return reconcile.Result{}, err + } + } + + // Requeue if the resource is not ready: + return reconcile.Result{}, err +} + +func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { + // See if the channel has been deleted + accessor, err := meta.Accessor(channel) + if err != nil { + r.log.Info("failed to get metadata", "error", err) + return err + } + deletionTimestamp := accessor.GetDeletionTimestamp() + if deletionTimestamp != nil { + r.log.Info(fmt.Sprintf("DeletionTimestamp: %v", deletionTimestamp)) + //TODO: Handle deletion + return nil + } + + // Skip Channel as it is not targeting any provisioner + if channel.Spec.Provisioner == nil || channel.Spec.Provisioner.Ref == nil { + return nil + } + + // Skip channel not managed by this provisioner + provisionerRef := channel.Spec.Provisioner.Ref + clusterProvisioner, err := r.getClusterProvisioner() + if err != nil { + return err + } + // TODO: Is there a better way to compare ref? + if provisionerRef.Name != clusterProvisioner.Name || provisionerRef.Namespace != clusterProvisioner.Namespace { + return nil + } + + // The provisioner must be ready + if !clusterProvisioner.Status.IsReady() { + r.log.Info("provisioner is not ready", "provisioner", clusterProvisioner) + return nil + } + + // TODO: provision channel + channel.Status.InitializeConditions() + channel.Status.MarkAsNotProvisioned("NotProvisioned", "NotImplemented") + return nil +} + +func (r *reconciler) getClusterProvisioner() (*v1alpha1.ClusterProvisioner, error) { + config, err := controller.GetProvisionerConfig(r.client) + if err != nil { + return nil, err + } + clusterProvisioner := &v1alpha1.ClusterProvisioner{} + objKey := client.ObjectKey{ + Namespace: config.Namespace, + Name: config.Name, + } + if err = r.client.Get(context.TODO(), objKey, clusterProvisioner); err != nil { + return nil, err + } + return clusterProvisioner, nil + +} + +func (r *reconciler) updateStatus(channel *v1alpha1.Channel) (*v1alpha1.Channel, error) { + newChannel := &v1alpha1.Channel{} + err := r.client.Get(context.TODO(), client.ObjectKey{Namespace: channel.Namespace, Name: channel.Name}, newChannel) + + if err != nil { + return nil, err + } + newChannel.Status = channel.Status + + // Until #38113 is merged, we must use Update instead of UpdateStatus to + // update the Status block of the Channel resource. UpdateStatus will not + // allow changes to the Spec of the resource, which is ideal for ensuring + // nothing other than resource status has been updated. + if err = r.client.Update(context.TODO(), newChannel); err != nil { + return nil, err + } + return newChannel, nil +} diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go new file mode 100644 index 00000000000..7781765170f --- /dev/null +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -0,0 +1,232 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Veroute.on 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package channel + +import ( + "fmt" + "testing" + + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + + "github.com/knative/eventing/pkg/apis/eventing" + eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + controllertesting "github.com/knative/eventing/pkg/controller/testing" + "github.com/knative/eventing/pkg/provisioners/kafka/controller" + "github.com/knative/eventing/pkg/system" +) + +var ( + log = logf.Log.WithName("testing") +) + +const ( + channelName = "test-channel" + clusterProvisionerName = "kafka" + testNS = "test-namespace" +) + +func init() { + // Add types to scheme + eventingv1alpha1.AddToScheme(scheme.Scheme) +} + +var testCases = []controllertesting.TestCase{ + { + Name: "new channel with valid provisioner: adds not provisioned status", + InitialState: []runtime.Object{ + getNewClusterProvisioner(clusterProvisionerName), + getNewChannel(channelName, clusterProvisionerName), + getControllerConfigMap(), + }, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), + WantResult: reconcile.Result{}, + WantPresent: []runtime.Object{ + getNewChannelUnknownStatus(channelName, clusterProvisionerName), + }, + IgnoreTimes: true, + }, + { + Name: "new channel with missing provisioner: error", + InitialState: []runtime.Object{ + getNewChannel(channelName, clusterProvisionerName), + getControllerConfigMap(), + }, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), + WantResult: reconcile.Result{}, + WantErrMsg: "clusterprovisioners.eventing.knative.dev \"" + clusterProvisionerName + "\" not found", + IgnoreTimes: true, + }, + { + Name: "new channel with provisioner not managed by this controller: skips channel", + InitialState: []runtime.Object{ + getNewChannel(channelName, "not-our-provisioner"), + getNewClusterProvisioner("not-our-provisioner"), + getNewClusterProvisioner(clusterProvisionerName), + getControllerConfigMap(), + }, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), + WantResult: reconcile.Result{}, + WantPresent: []runtime.Object{ + getNewChannel(channelName, "not-our-provisioner"), + }, + IgnoreTimes: true, + }, + { + Name: "new channel with missing provisioner reference: skips channel", + InitialState: []runtime.Object{ + getNewChannelNoProvisioner(channelName), + getControllerConfigMap(), + }, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), + WantResult: reconcile.Result{}, + WantPresent: []runtime.Object{ + getNewChannelNoProvisioner(channelName), + }, + IgnoreTimes: true, + }, +} + +func TestAllCases(t *testing.T) { + recorder := record.NewBroadcaster().NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) + + for _, tc := range testCases { + c := tc.GetClient() + r := &reconciler{ + client: c, + restConfig: &rest.Config{}, + recorder: recorder, + log: log, + } + t.Logf("Running test %s", tc.Name) + t.Run(tc.Name, tc.Runner(t, r, c)) + } +} + +func getNewChannelNoProvisioner(name string) *eventingv1alpha1.Channel { + channel := &eventingv1alpha1.Channel{ + TypeMeta: channelType(), + ObjectMeta: om(testNS, name), + Spec: eventingv1alpha1.ChannelSpec{}, + } + // selflink is not filled in when we create the object, so clear it + channel.ObjectMeta.SelfLink = "" + return channel +} + +func getNewChannel(name, provisioner string) *eventingv1alpha1.Channel { + channel := &eventingv1alpha1.Channel{ + TypeMeta: channelType(), + ObjectMeta: om(testNS, name), + Spec: eventingv1alpha1.ChannelSpec{ + Provisioner: &eventingv1alpha1.ProvisionerReference{ + Ref: &corev1.ObjectReference{ + Name: provisioner, + Kind: "ClusterProvisioner", + APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), + }, + }, + }, + } + // selflink is not filled in when we create the object, so clear it + channel.ObjectMeta.SelfLink = "" + return channel +} + +func getNewChannelUnknownStatus(name, provisioner string) *eventingv1alpha1.Channel { + c := getNewChannel(name, provisioner) + c.Status = eventingv1alpha1.ChannelStatus{ + Conditions: []duckv1alpha1.Condition{ + { + Type: eventingv1alpha1.ChannelConditionProvisioned, + Status: corev1.ConditionFalse, + Reason: "NotProvisioned", + Message: "NotImplemented"}, + { + Type: eventingv1alpha1.ChannelConditionReady, + Status: corev1.ConditionFalse, + Reason: "NotProvisioned", + Message: "NotImplemented", + }, + }, + } + return c +} + +func channelType() metav1.TypeMeta { + return metav1.TypeMeta{ + APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), + Kind: "Channel", + } +} + +func getNewClusterProvisioner(name string) *eventingv1alpha1.ClusterProvisioner { + clusterProvisioner := &eventingv1alpha1.ClusterProvisioner{ + TypeMeta: metav1.TypeMeta{ + APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), + Kind: "ClusterProvisioner", + }, + ObjectMeta: om("", name), + Spec: eventingv1alpha1.ClusterProvisionerSpec{ + Reconciles: metav1.GroupKind{ + Kind: "Channel", + Group: eventing.GroupName, + }, + }, + Status: eventingv1alpha1.ClusterProvisionerStatus{ + Conditions: []duckv1alpha1.Condition{ + { + Type: eventingv1alpha1.ClusterProvisionerConditionProvisionerReady, + Status: corev1.ConditionTrue, + }, + { + Type: eventingv1alpha1.ClusterProvisionerConditionReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + // selflink is not filled in when we create the object, so clear it + clusterProvisioner.ObjectMeta.SelfLink = "" + return clusterProvisioner +} + +func om(namespace, name string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name), + } +} + +func getControllerConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: om(system.Namespace, controller.ControllerConfigMapName), + Data: map[string]string{ + controller.ProvisionerNameConfigMapKey: clusterProvisionerName, + controller.ProvisionerNamespaceConfigMapKey: "", + controller.BrokerConfigMapKey: "test-broker", + }, + } +} diff --git a/pkg/provisioners/kafka/controller/provider.go b/pkg/provisioners/kafka/controller/provider.go new file mode 100644 index 00000000000..fb73c4a9fc4 --- /dev/null +++ b/pkg/provisioners/kafka/controller/provider.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "github.com/go-logr/logr" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" +) + +const ( + // controllerAgentName is the string used by this controller to identify + // itself when creating events. + controllerAgentName = "kafka-provisioner-controller" +) + +type reconciler struct { + client client.Client + restConfig *rest.Config + recorder record.EventRecorder + log logr.Logger +} + +// Verify the struct implements reconcile.Reconciler +var _ reconcile.Reconciler = &reconciler{} + +// ProvideController returns a Provisioner controller. +func ProvideController(mgr manager.Manager, log logr.Logger) (controller.Controller, error) { + // Setup a new controller to Reconcile Provisioners. + c, err := controller.New(controllerAgentName, mgr, controller.Options{ + Reconciler: &reconciler{ + recorder: mgr.GetRecorder(controllerAgentName), + log: log, + }, + }) + if err != nil { + return nil, err + } + + // Watch ClusterProvisioner events and enqueue ClusterProvisioner object key. + if err := c.Watch(&source.Kind{Type: &v1alpha1.ClusterProvisioner{}}, &handler.EnqueueRequestForObject{}); err != nil { + return nil, err + } + + return c, nil +} + +func (r *reconciler) InjectClient(c client.Client) error { + r.client = c + return nil +} + +func (r *reconciler) InjectConfig(c *rest.Config) error { + r.restConfig = c + return nil +} diff --git a/pkg/provisioners/kafka/controller/reconcile.go b/pkg/provisioners/kafka/controller/reconcile.go new file mode 100644 index 00000000000..fbd01f0eaa6 --- /dev/null +++ b/pkg/provisioners/kafka/controller/reconcile.go @@ -0,0 +1,123 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/knative/eventing/pkg/apis/eventing" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" +) + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Provisioner resource +// with the current status of the resource. +func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) { + r.log.Info("reconciling ClusterProvisioner", "request", request) + provisioner := &v1alpha1.ClusterProvisioner{} + err := r.client.Get(context.TODO(), request.NamespacedName, provisioner) + + if errors.IsNotFound(err) { + r.log.Info("could not find ClusterProvisioner", "request", request) + return reconcile.Result{}, nil + } + + if err != nil { + r.log.Error(err, "could not fetch ClusterProvisioner", "request", request) + return reconcile.Result{}, err + } + + original := provisioner.DeepCopy() + + // Reconcile this copy of the Provisioner and then write back any status + // updates regardless of whether the reconcile error out. + err = r.reconcile(provisioner) + if !equality.Semantic.DeepEqual(original.Status, provisioner.Status) { + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the informer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + if _, err := r.updateStatus(provisioner); err != nil { + r.log.Info("failed to update Provisioner status", "error", err) + return reconcile.Result{}, err + } + } + + // Requeue if the resource is not ready: + return reconcile.Result{}, err +} + +func (r *reconciler) reconcile(provisioner *v1alpha1.ClusterProvisioner) error { + // See if the provisioner has been deleted + accessor, err := meta.Accessor(provisioner) + if err != nil { + r.log.Info("failed to get metadata", "error", err) + return err + } + deletionTimestamp := accessor.GetDeletionTimestamp() + if deletionTimestamp != nil { + r.log.Info(fmt.Sprintf("DeletionTimestamp: %v", deletionTimestamp)) + return nil + } + + // Only reconcile channel provisioners + if provisioner.Spec.Reconciles.Group != eventing.GroupName || provisioner.Spec.Reconciles.Kind != "Channel" { + return nil + } + + config, err := GetProvisionerConfig(r.client) + if err != nil { + return err + } + + // Skip channel provisioners that we don't manage + if provisioner.Name != config.Name || provisioner.Namespace != config.Namespace { + return nil + } + + provisioner.Status.InitializeConditions() + // Update Status as Ready + provisioner.Status.MarkProvisionerReady() + + return nil +} + +func (r *reconciler) updateStatus(provisioner *v1alpha1.ClusterProvisioner) (*v1alpha1.ClusterProvisioner, error) { + newProvisioner := &v1alpha1.ClusterProvisioner{} + err := r.client.Get(context.TODO(), client.ObjectKey{Namespace: provisioner.Namespace, Name: provisioner.Name}, newProvisioner) + + if err != nil { + return nil, err + } + newProvisioner.Status = provisioner.Status + + // Until #38113 is merged, we must use Update instead of UpdateStatus to + // update the Status block of the Provisioner resource. UpdateStatus will not + // allow changes to the Spec of the resource, which is ideal for ensuring + // nothing other than resource status has been updated. + if err = r.client.Update(context.TODO(), newProvisioner); err != nil { + return nil, err + } + return newProvisioner, nil +} diff --git a/pkg/provisioners/kafka/controller/reconcile_test.go b/pkg/provisioners/kafka/controller/reconcile_test.go new file mode 100644 index 00000000000..2e3f0e341a8 --- /dev/null +++ b/pkg/provisioners/kafka/controller/reconcile_test.go @@ -0,0 +1,174 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Veroute.on 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "testing" + + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + + "github.com/knative/eventing/pkg/apis/eventing" + eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + controllertesting "github.com/knative/eventing/pkg/controller/testing" + "github.com/knative/eventing/pkg/system" +) + +var ( + log = logf.Log.WithName("testing") +) + +const ( + clusterProvisionerName = "kafka" + testNS = "" +) + +func init() { + // Add types to scheme + eventingv1alpha1.AddToScheme(scheme.Scheme) +} + +var ClusterProvisionerConditionReady = duckv1alpha1.Condition{ + Type: eventingv1alpha1.ClusterProvisionerConditionReady, + Status: corev1.ConditionTrue, +} + +var ClusterProvisionerConditionProvisionerReady = duckv1alpha1.Condition{ + Type: eventingv1alpha1.ClusterProvisionerConditionProvisionerReady, + Status: corev1.ConditionTrue, +} + +var testCases = []controllertesting.TestCase{ + { + Name: "new channel clusterprovisioner: adds status", + InitialState: []runtime.Object{ + GetNewChannelClusterProvisioner(clusterProvisionerName), + getControllerConfigMap(), + }, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterProvisionerName), + WantResult: reconcile.Result{}, + WantPresent: []runtime.Object{ + GetNewChannelClusterProvisionerReady(clusterProvisionerName), + }, + IgnoreTimes: true, + }, + { + Name: "reconciles only channel kind", + InitialState: []runtime.Object{ + getNewClusterProvisioner(clusterProvisionerName, "Source"), + getControllerConfigMap(), + }, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterProvisionerName), + WantResult: reconcile.Result{}, + WantPresent: []runtime.Object{ + getNewClusterProvisioner(clusterProvisionerName, "Source"), + }, + }, + { + Name: "reconciles only associated provisioner", + InitialState: []runtime.Object{ + GetNewChannelClusterProvisioner("not-default-provisioner"), + getControllerConfigMap(), + }, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, "not-default-provisioner"), + WantResult: reconcile.Result{}, + WantPresent: []runtime.Object{ + GetNewChannelClusterProvisioner("not-default-provisioner"), + }, + }, +} + +func TestAllCases(t *testing.T) { + recorder := record.NewBroadcaster().NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) + + for _, tc := range testCases { + c := tc.GetClient() + r := &reconciler{ + client: c, + restConfig: &rest.Config{}, + recorder: recorder, + log: log, + } + t.Logf("Running test %s", tc.Name) + t.Run(tc.Name, tc.Runner(t, r, c)) + } +} + +func GetNewChannelClusterProvisioner(name string) *eventingv1alpha1.ClusterProvisioner { + return getNewClusterProvisioner(name, "Channel") +} + +func getNewClusterProvisioner(name string, reconcileKind string) *eventingv1alpha1.ClusterProvisioner { + clusterProvisioner := &eventingv1alpha1.ClusterProvisioner{ + TypeMeta: ClusterProvisonerType(), + ObjectMeta: om(testNS, name), + Spec: eventingv1alpha1.ClusterProvisionerSpec{ + Reconciles: metav1.GroupKind{ + Kind: reconcileKind, + Group: eventing.GroupName, + }, + }, + } + // selflink is not filled in when we create the object, so clear it + clusterProvisioner.ObjectMeta.SelfLink = "" + return clusterProvisioner +} + +func GetNewChannelClusterProvisionerReady(name string) *eventingv1alpha1.ClusterProvisioner { + c := GetNewChannelClusterProvisioner(name) + c.Status = eventingv1alpha1.ClusterProvisionerStatus{ + Conditions: []duckv1alpha1.Condition{ + ClusterProvisionerConditionProvisionerReady, + ClusterProvisionerConditionReady, + }, + } + return c +} + +func ClusterProvisonerType() metav1.TypeMeta { + return metav1.TypeMeta{ + APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), + Kind: "ClusterProvisioner", + } +} + +func om(namespace, name string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name), + } +} + +func getControllerConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: om(system.Namespace, ControllerConfigMapName), + Data: map[string]string{ + ProvisionerNameConfigMapKey: clusterProvisionerName, + ProvisionerNamespaceConfigMapKey: "", + BrokerConfigMapKey: "test-broker", + }, + } +} diff --git a/pkg/provisioners/kafka/controller/util.go b/pkg/provisioners/kafka/controller/util.go new file mode 100644 index 00000000000..71e6ff7a10d --- /dev/null +++ b/pkg/provisioners/kafka/controller/util.go @@ -0,0 +1,66 @@ +package controller + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + runtimeClient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/knative/eventing/pkg/system" +) + +const ( + // controllerConfigMapName is the name of the configmap in the eventing + // namespace that holds the configuration for this controller. + ControllerConfigMapName = "kafka-provisioner-config" + + ProvisionerNameConfigMapKey = "provisioner-name" + ProvisionerNamespaceConfigMapKey = "provisioner-namespace" + BrokerConfigMapKey = "brokers" +) + +type KafkaProvisionerConfig struct { + Name string + Namespace string + Brokers []string +} + +// GetProvisionerConfig returns the details of the associated Provisioner/ClusterProvisioner object +func GetProvisionerConfig(client runtimeClient.Client) (*KafkaProvisionerConfig, error) { + configMapKey := runtimeClient.ObjectKey{ + Namespace: system.Namespace, + Name: ControllerConfigMapName, + } + + configMap := &corev1.ConfigMap{} + if err := client.Get(context.TODO(), configMapKey, configMap); err != nil { + return nil, err + } + + config := &KafkaProvisionerConfig{} + + if value, ok := configMap.Data[ProvisionerNameConfigMapKey]; ok { + config.Name = value + } else { + return nil, fmt.Errorf("missing key %s in config map %s", ProvisionerNameConfigMapKey, ControllerConfigMapName) + } + + if value, ok := configMap.Data[ProvisionerNamespaceConfigMapKey]; ok { + config.Namespace = value + } else { + return nil, fmt.Errorf("missing key %s in config map %s", ProvisionerNamespaceConfigMapKey, ControllerConfigMapName) + } + + if value, ok := configMap.Data[BrokerConfigMapKey]; ok { + brokers := strings.Split(value, ",") + if len(brokers) == 0 { + return nil, fmt.Errorf("missing kafka brokers in configmap %s", ControllerConfigMapName) + } + config.Brokers = brokers + return config, nil + } + + return nil, fmt.Errorf("missing key %s in config map %s", BrokerConfigMapKey, ControllerConfigMapName) +} diff --git a/pkg/provisioners/kafka/main.go b/pkg/provisioners/kafka/main.go new file mode 100644 index 00000000000..6fdcfbb063e --- /dev/null +++ b/pkg/provisioners/kafka/main.go @@ -0,0 +1,71 @@ +package main + +import ( + "flag" + "os" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/signals" + + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + provisionerController "github.com/knative/eventing/pkg/provisioners/kafka/controller" + "github.com/knative/eventing/pkg/provisioners/kafka/controller/channel" +) + +var log = logf.Log.WithName("kafka-provisioner") + +// SchemeFunc adds types to a Scheme. +type SchemeFunc func(*runtime.Scheme) error + +// ProvideFunc adds a controller to a Manager. +type ProvideFunc func(mgr manager.Manager, log logr.Logger) (controller.Controller, error) + +// controllerRuntimeStart runs controllers written for controller-runtime. It's +// intended to be called from main(). Any controllers migrated to use +// controller-runtime should move their initialization to this function. +func controllerRuntimeStart() error { + // Setup a Manager + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + return err + } + + // Add custom types to this array to get them into the manager's scheme. + schemeFuncs := []SchemeFunc{ + v1alpha1.AddToScheme, + } + for _, schemeFunc := range schemeFuncs { + schemeFunc(mgr.GetScheme()) + } + + // Add each controller's ProvideController func to this list to have the + // manager run it. + providers := []ProvideFunc{ + provisionerController.ProvideController, + channel.ProvideController, + } + + for _, provider := range providers { + if _, err := provider(mgr, log); err != nil { + return err + } + } + + return mgr.Start(signals.SetupSignalHandler()) +} + +func main() { + flag.Parse() + logf.SetLogger(logf.ZapLogger(false)) + entryLog := log.WithName("entrypoint") + if err := controllerRuntimeStart(); err != nil { + entryLog.Error(err, "unable to run controller manager") + os.Exit(1) + } +} From 66375547f16ec0f937e0f07b7e3376ab6faa0123 Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Thu, 27 Sep 2018 22:57:17 -0700 Subject: [PATCH 02/20] Remove controllerRuntimeStart and address PR comments --- .../kafka/kafka-provisioner-config.yaml | 4 +- .../kafka/controller/channel/reconcile.go | 7 ++- .../controller/channel/reconcile_test.go | 47 ++++++++++++++----- .../kafka/controller/reconcile.go | 2 +- .../kafka/controller/reconcile_test.go | 5 +- pkg/provisioners/kafka/controller/util.go | 20 +++----- pkg/provisioners/kafka/main.go | 27 ++++------- 7 files changed, 57 insertions(+), 55 deletions(-) diff --git a/config/provisioners/kafka/kafka-provisioner-config.yaml b/config/provisioners/kafka/kafka-provisioner-config.yaml index 9f67cf64852..cf27fe525fd 100644 --- a/config/provisioners/kafka/kafka-provisioner-config.yaml +++ b/config/provisioners/kafka/kafka-provisioner-config.yaml @@ -19,8 +19,6 @@ metadata: namespace: knative-eventing data: # Name of the provisioner that this controller represents - provisioner-name: kafka - # Leave namespace empty string for cluster provisioner - provisioner-namespace: "" + cluster-provisioner-name: kafka # Broker URL's for the provisioner brokers: kafkabroker.kafka:9092 \ No newline at end of file diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go index dcf0423a725..ab631cac54a 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -100,8 +100,8 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { // The provisioner must be ready if !clusterProvisioner.Status.IsReady() { - r.log.Info("provisioner is not ready", "provisioner", clusterProvisioner) - return nil + channel.Status.MarkAsNotProvisioned("NotProvisioned", "ClusterProvisioner %s is not ready", clusterProvisioner.Name) + return fmt.Errorf("ClusterProvisioner %s is not ready", clusterProvisioner.Name) } // TODO: provision channel @@ -117,8 +117,7 @@ func (r *reconciler) getClusterProvisioner() (*v1alpha1.ClusterProvisioner, erro } clusterProvisioner := &v1alpha1.ClusterProvisioner{} objKey := client.ObjectKey{ - Namespace: config.Namespace, - Name: config.Name, + Name: config.Name, } if err = r.client.Get(context.TODO(), objKey, clusterProvisioner); err != nil { return nil, err diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go index 7781765170f..f1e95c288ab 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -56,14 +56,30 @@ var testCases = []controllertesting.TestCase{ { Name: "new channel with valid provisioner: adds not provisioned status", InitialState: []runtime.Object{ - getNewClusterProvisioner(clusterProvisionerName), + getNewClusterProvisioner(clusterProvisionerName, true), getNewChannel(channelName, clusterProvisionerName), getControllerConfigMap(), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, WantPresent: []runtime.Object{ - getNewChannelUnknownStatus(channelName, clusterProvisionerName), + getNewChannelNotProvisionedStatus(channelName, clusterProvisionerName, "NotImplemented"), + }, + IgnoreTimes: true, + }, + { + Name: "new channel with provisioner not ready: error", + InitialState: []runtime.Object{ + getNewClusterProvisioner(clusterProvisionerName, false), + getNewChannel(channelName, clusterProvisionerName), + getControllerConfigMap(), + }, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), + WantResult: reconcile.Result{}, + WantErrMsg: "ClusterProvisioner " + clusterProvisionerName + " is not ready", + WantPresent: []runtime.Object{ + getNewChannelNotProvisionedStatus(channelName, clusterProvisionerName, + "ClusterProvisioner "+clusterProvisionerName+" is not ready"), }, IgnoreTimes: true, }, @@ -82,8 +98,8 @@ var testCases = []controllertesting.TestCase{ Name: "new channel with provisioner not managed by this controller: skips channel", InitialState: []runtime.Object{ getNewChannel(channelName, "not-our-provisioner"), - getNewClusterProvisioner("not-our-provisioner"), - getNewClusterProvisioner(clusterProvisionerName), + getNewClusterProvisioner("not-our-provisioner", true), + getNewClusterProvisioner(clusterProvisionerName, true), getControllerConfigMap(), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), @@ -154,7 +170,7 @@ func getNewChannel(name, provisioner string) *eventingv1alpha1.Channel { return channel } -func getNewChannelUnknownStatus(name, provisioner string) *eventingv1alpha1.Channel { +func getNewChannelNotProvisionedStatus(name, provisioner, msg string) *eventingv1alpha1.Channel { c := getNewChannel(name, provisioner) c.Status = eventingv1alpha1.ChannelStatus{ Conditions: []duckv1alpha1.Condition{ @@ -162,12 +178,12 @@ func getNewChannelUnknownStatus(name, provisioner string) *eventingv1alpha1.Chan Type: eventingv1alpha1.ChannelConditionProvisioned, Status: corev1.ConditionFalse, Reason: "NotProvisioned", - Message: "NotImplemented"}, + Message: msg}, { Type: eventingv1alpha1.ChannelConditionReady, Status: corev1.ConditionFalse, Reason: "NotProvisioned", - Message: "NotImplemented", + Message: msg, }, }, } @@ -181,7 +197,13 @@ func channelType() metav1.TypeMeta { } } -func getNewClusterProvisioner(name string) *eventingv1alpha1.ClusterProvisioner { +func getNewClusterProvisioner(name string, isReady bool) *eventingv1alpha1.ClusterProvisioner { + var condStatus corev1.ConditionStatus + if isReady { + condStatus = corev1.ConditionTrue + } else { + condStatus = corev1.ConditionFalse + } clusterProvisioner := &eventingv1alpha1.ClusterProvisioner{ TypeMeta: metav1.TypeMeta{ APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), @@ -198,11 +220,11 @@ func getNewClusterProvisioner(name string) *eventingv1alpha1.ClusterProvisioner Conditions: []duckv1alpha1.Condition{ { Type: eventingv1alpha1.ClusterProvisionerConditionProvisionerReady, - Status: corev1.ConditionTrue, + Status: condStatus, }, { Type: eventingv1alpha1.ClusterProvisionerConditionReady, - Status: corev1.ConditionTrue, + Status: condStatus, }, }, }, @@ -224,9 +246,8 @@ func getControllerConfigMap() *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: om(system.Namespace, controller.ControllerConfigMapName), Data: map[string]string{ - controller.ProvisionerNameConfigMapKey: clusterProvisionerName, - controller.ProvisionerNamespaceConfigMapKey: "", - controller.BrokerConfigMapKey: "test-broker", + controller.ClusterProvisionerNameConfigMapKey: clusterProvisionerName, + controller.BrokerConfigMapKey: "test-broker", }, } } diff --git a/pkg/provisioners/kafka/controller/reconcile.go b/pkg/provisioners/kafka/controller/reconcile.go index fbd01f0eaa6..2446175fe61 100644 --- a/pkg/provisioners/kafka/controller/reconcile.go +++ b/pkg/provisioners/kafka/controller/reconcile.go @@ -92,7 +92,7 @@ func (r *reconciler) reconcile(provisioner *v1alpha1.ClusterProvisioner) error { } // Skip channel provisioners that we don't manage - if provisioner.Name != config.Name || provisioner.Namespace != config.Namespace { + if provisioner.Name != config.Name { return nil } diff --git a/pkg/provisioners/kafka/controller/reconcile_test.go b/pkg/provisioners/kafka/controller/reconcile_test.go index 2e3f0e341a8..ae2a6bdf96b 100644 --- a/pkg/provisioners/kafka/controller/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/reconcile_test.go @@ -166,9 +166,8 @@ func getControllerConfigMap() *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: om(system.Namespace, ControllerConfigMapName), Data: map[string]string{ - ProvisionerNameConfigMapKey: clusterProvisionerName, - ProvisionerNamespaceConfigMapKey: "", - BrokerConfigMapKey: "test-broker", + ClusterProvisionerNameConfigMapKey: clusterProvisionerName, + BrokerConfigMapKey: "test-broker", }, } } diff --git a/pkg/provisioners/kafka/controller/util.go b/pkg/provisioners/kafka/controller/util.go index 71e6ff7a10d..6d3ff629cac 100644 --- a/pkg/provisioners/kafka/controller/util.go +++ b/pkg/provisioners/kafka/controller/util.go @@ -16,15 +16,13 @@ const ( // namespace that holds the configuration for this controller. ControllerConfigMapName = "kafka-provisioner-config" - ProvisionerNameConfigMapKey = "provisioner-name" - ProvisionerNamespaceConfigMapKey = "provisioner-namespace" - BrokerConfigMapKey = "brokers" + ClusterProvisionerNameConfigMapKey = "cluster-provisioner-name" + BrokerConfigMapKey = "brokers" ) type KafkaProvisionerConfig struct { - Name string - Namespace string - Brokers []string + Name string + Brokers []string } // GetProvisionerConfig returns the details of the associated Provisioner/ClusterProvisioner object @@ -41,16 +39,10 @@ func GetProvisionerConfig(client runtimeClient.Client) (*KafkaProvisionerConfig, config := &KafkaProvisionerConfig{} - if value, ok := configMap.Data[ProvisionerNameConfigMapKey]; ok { + if value, ok := configMap.Data[ClusterProvisionerNameConfigMapKey]; ok { config.Name = value } else { - return nil, fmt.Errorf("missing key %s in config map %s", ProvisionerNameConfigMapKey, ControllerConfigMapName) - } - - if value, ok := configMap.Data[ProvisionerNamespaceConfigMapKey]; ok { - config.Namespace = value - } else { - return nil, fmt.Errorf("missing key %s in config map %s", ProvisionerNamespaceConfigMapKey, ControllerConfigMapName) + return nil, fmt.Errorf("missing key %s in config map %s", ClusterProvisionerNameConfigMapKey, ControllerConfigMapName) } if value, ok := configMap.Data[BrokerConfigMapKey]; ok { diff --git a/pkg/provisioners/kafka/main.go b/pkg/provisioners/kafka/main.go index 6fdcfbb063e..4ece9bdc89a 100644 --- a/pkg/provisioners/kafka/main.go +++ b/pkg/provisioners/kafka/main.go @@ -26,14 +26,16 @@ type SchemeFunc func(*runtime.Scheme) error // ProvideFunc adds a controller to a Manager. type ProvideFunc func(mgr manager.Manager, log logr.Logger) (controller.Controller, error) -// controllerRuntimeStart runs controllers written for controller-runtime. It's -// intended to be called from main(). Any controllers migrated to use -// controller-runtime should move their initialization to this function. -func controllerRuntimeStart() error { +func main() { + flag.Parse() + logf.SetLogger(logf.ZapLogger(false)) + entryLog := log.WithName("entrypoint") + // Setup a Manager mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) if err != nil { - return err + entryLog.Error(err, "unable to run controller manager") + os.Exit(1) } // Add custom types to this array to get them into the manager's scheme. @@ -53,19 +55,10 @@ func controllerRuntimeStart() error { for _, provider := range providers { if _, err := provider(mgr, log); err != nil { - return err + entryLog.Error(err, "unable to run controller manager") + os.Exit(1) } } - return mgr.Start(signals.SetupSignalHandler()) -} - -func main() { - flag.Parse() - logf.SetLogger(logf.ZapLogger(false)) - entryLog := log.WithName("entrypoint") - if err := controllerRuntimeStart(); err != nil { - entryLog.Error(err, "unable to run controller manager") - os.Exit(1) - } + mgr.Start(signals.SetupSignalHandler()) } From c25ce1b7829dd78a112f77a57a9856c59e2401e4 Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Mon, 8 Oct 2018 15:42:04 -0700 Subject: [PATCH 03/20] Remove fetching configmap in controller --- config/provisioners/kafka/clusterrole.yaml | 5 +- config/provisioners/kafka/controller.yaml | 9 ++- .../kafka/controller/channel/provider.go | 5 +- .../kafka/controller/channel/reconcile.go | 9 +-- .../controller/channel/reconcile_test.go | 18 ++---- pkg/provisioners/kafka/controller/provider.go | 4 +- .../kafka/controller/reconcile.go | 13 ++--- .../kafka/controller/reconcile_test.go | 16 ++--- pkg/provisioners/kafka/controller/types.go | 6 ++ pkg/provisioners/kafka/controller/util.go | 58 ------------------- pkg/provisioners/kafka/main.go | 50 +++++++++++++++- 11 files changed, 86 insertions(+), 107 deletions(-) create mode 100644 pkg/provisioners/kafka/controller/types.go delete mode 100644 pkg/provisioners/kafka/controller/util.go diff --git a/config/provisioners/kafka/clusterrole.yaml b/config/provisioners/kafka/clusterrole.yaml index 3298a12f984..6aa22f158ed 100644 --- a/config/provisioners/kafka/clusterrole.yaml +++ b/config/provisioners/kafka/clusterrole.yaml @@ -25,7 +25,4 @@ rules: verbs: ["get", "watch", "list", "update", "patch"] - apiGroups: [""] resources: ["events"] - verbs: ["create", "patch"] -- apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "watch", "list"] \ No newline at end of file + verbs: ["create", "patch"] \ No newline at end of file diff --git a/config/provisioners/kafka/controller.yaml b/config/provisioners/kafka/controller.yaml index 2bd28585745..e8fa2b9b522 100644 --- a/config/provisioners/kafka/controller.yaml +++ b/config/provisioners/kafka/controller.yaml @@ -31,4 +31,11 @@ spec: args: [ "-logtostderr", "-stderrthreshold", "INFO", - ] \ No newline at end of file + ] + volumeMounts: + - name: kafka-provisioner-config + mountPath: /etc/config-provisioner + volumes: + - name: kafka-provisioner-config + configMap: + name: kafka-provisioner-config \ No newline at end of file diff --git a/pkg/provisioners/kafka/controller/channel/provider.go b/pkg/provisioners/kafka/controller/channel/provider.go index 61b51ff3e45..8fcab75de36 100644 --- a/pkg/provisioners/kafka/controller/channel/provider.go +++ b/pkg/provisioners/kafka/controller/channel/provider.go @@ -28,6 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + common "github.com/knative/eventing/pkg/provisioners/kafka/controller" ) const ( @@ -41,18 +42,20 @@ type reconciler struct { restConfig *rest.Config recorder record.EventRecorder log logr.Logger + config *common.KafkaProvisionerConfig } // Verify the struct implements reconcile.Reconciler var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Channel controller. -func ProvideController(mgr manager.Manager, log logr.Logger) (controller.Controller, error) { +func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfig, log logr.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Channel. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ recorder: mgr.GetRecorder(controllerAgentName), log: log, + config: config, }, }) if err != nil { diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go index ab631cac54a..19925bb4003 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -27,7 +27,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "github.com/knative/eventing/pkg/provisioners/kafka/controller" ) // Reconcile compares the actual state with the desired, and attempts to @@ -111,15 +110,11 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { } func (r *reconciler) getClusterProvisioner() (*v1alpha1.ClusterProvisioner, error) { - config, err := controller.GetProvisionerConfig(r.client) - if err != nil { - return nil, err - } clusterProvisioner := &v1alpha1.ClusterProvisioner{} objKey := client.ObjectKey{ - Name: config.Name, + Name: r.config.Name, } - if err = r.client.Get(context.TODO(), objKey, clusterProvisioner); err != nil { + if err := r.client.Get(context.TODO(), objKey, clusterProvisioner); err != nil { return nil, err } return clusterProvisioner, nil diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go index f1e95c288ab..04e1256e8b1 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -34,7 +34,6 @@ import ( eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" controllertesting "github.com/knative/eventing/pkg/controller/testing" "github.com/knative/eventing/pkg/provisioners/kafka/controller" - "github.com/knative/eventing/pkg/system" ) var ( @@ -58,7 +57,6 @@ var testCases = []controllertesting.TestCase{ InitialState: []runtime.Object{ getNewClusterProvisioner(clusterProvisionerName, true), getNewChannel(channelName, clusterProvisionerName), - getControllerConfigMap(), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, @@ -72,7 +70,6 @@ var testCases = []controllertesting.TestCase{ InitialState: []runtime.Object{ getNewClusterProvisioner(clusterProvisionerName, false), getNewChannel(channelName, clusterProvisionerName), - getControllerConfigMap(), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, @@ -87,7 +84,6 @@ var testCases = []controllertesting.TestCase{ Name: "new channel with missing provisioner: error", InitialState: []runtime.Object{ getNewChannel(channelName, clusterProvisionerName), - getControllerConfigMap(), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, @@ -100,7 +96,6 @@ var testCases = []controllertesting.TestCase{ getNewChannel(channelName, "not-our-provisioner"), getNewClusterProvisioner("not-our-provisioner", true), getNewClusterProvisioner(clusterProvisionerName, true), - getControllerConfigMap(), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, @@ -113,7 +108,6 @@ var testCases = []controllertesting.TestCase{ Name: "new channel with missing provisioner reference: skips channel", InitialState: []runtime.Object{ getNewChannelNoProvisioner(channelName), - getControllerConfigMap(), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, @@ -134,6 +128,7 @@ func TestAllCases(t *testing.T) { restConfig: &rest.Config{}, recorder: recorder, log: log, + config: getControllerConfig(), } t.Logf("Running test %s", tc.Name) t.Run(tc.Name, tc.Runner(t, r, c)) @@ -242,12 +237,9 @@ func om(namespace, name string) metav1.ObjectMeta { } } -func getControllerConfigMap() *corev1.ConfigMap { - return &corev1.ConfigMap{ - ObjectMeta: om(system.Namespace, controller.ControllerConfigMapName), - Data: map[string]string{ - controller.ClusterProvisionerNameConfigMapKey: clusterProvisionerName, - controller.BrokerConfigMapKey: "test-broker", - }, +func getControllerConfig() *controller.KafkaProvisionerConfig { + return &controller.KafkaProvisionerConfig{ + Name: clusterProvisionerName, + Brokers: []string{"test-broker"}, } } diff --git a/pkg/provisioners/kafka/controller/provider.go b/pkg/provisioners/kafka/controller/provider.go index fb73c4a9fc4..77e387bb0da 100644 --- a/pkg/provisioners/kafka/controller/provider.go +++ b/pkg/provisioners/kafka/controller/provider.go @@ -41,18 +41,20 @@ type reconciler struct { restConfig *rest.Config recorder record.EventRecorder log logr.Logger + config *KafkaProvisionerConfig } // Verify the struct implements reconcile.Reconciler var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Provisioner controller. -func ProvideController(mgr manager.Manager, log logr.Logger) (controller.Controller, error) { +func ProvideController(mgr manager.Manager, config *KafkaProvisionerConfig, log logr.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Provisioners. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ recorder: mgr.GetRecorder(controllerAgentName), log: log, + config: config, }, }) if err != nil { diff --git a/pkg/provisioners/kafka/controller/reconcile.go b/pkg/provisioners/kafka/controller/reconcile.go index 2446175fe61..fdfe0747314 100644 --- a/pkg/provisioners/kafka/controller/reconcile.go +++ b/pkg/provisioners/kafka/controller/reconcile.go @@ -81,18 +81,13 @@ func (r *reconciler) reconcile(provisioner *v1alpha1.ClusterProvisioner) error { return nil } - // Only reconcile channel provisioners - if provisioner.Spec.Reconciles.Group != eventing.GroupName || provisioner.Spec.Reconciles.Kind != "Channel" { + // Skip channel provisioners that we don't manage + if provisioner.Name != r.config.Name { return nil } - config, err := GetProvisionerConfig(r.client) - if err != nil { - return err - } - - // Skip channel provisioners that we don't manage - if provisioner.Name != config.Name { + // Only reconcile channel provisioners + if provisioner.Spec.Reconciles.Group != eventing.GroupName || provisioner.Spec.Reconciles.Kind != "Channel" { return nil } diff --git a/pkg/provisioners/kafka/controller/reconcile_test.go b/pkg/provisioners/kafka/controller/reconcile_test.go index ae2a6bdf96b..07a33f0b75e 100644 --- a/pkg/provisioners/kafka/controller/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/reconcile_test.go @@ -33,7 +33,6 @@ import ( "github.com/knative/eventing/pkg/apis/eventing" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" controllertesting "github.com/knative/eventing/pkg/controller/testing" - "github.com/knative/eventing/pkg/system" ) var ( @@ -65,7 +64,6 @@ var testCases = []controllertesting.TestCase{ Name: "new channel clusterprovisioner: adds status", InitialState: []runtime.Object{ GetNewChannelClusterProvisioner(clusterProvisionerName), - getControllerConfigMap(), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterProvisionerName), WantResult: reconcile.Result{}, @@ -78,7 +76,6 @@ var testCases = []controllertesting.TestCase{ Name: "reconciles only channel kind", InitialState: []runtime.Object{ getNewClusterProvisioner(clusterProvisionerName, "Source"), - getControllerConfigMap(), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterProvisionerName), WantResult: reconcile.Result{}, @@ -90,7 +87,6 @@ var testCases = []controllertesting.TestCase{ Name: "reconciles only associated provisioner", InitialState: []runtime.Object{ GetNewChannelClusterProvisioner("not-default-provisioner"), - getControllerConfigMap(), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, "not-default-provisioner"), WantResult: reconcile.Result{}, @@ -110,6 +106,7 @@ func TestAllCases(t *testing.T) { restConfig: &rest.Config{}, recorder: recorder, log: log, + config: getControllerConfig(), } t.Logf("Running test %s", tc.Name) t.Run(tc.Name, tc.Runner(t, r, c)) @@ -162,12 +159,9 @@ func om(namespace, name string) metav1.ObjectMeta { } } -func getControllerConfigMap() *corev1.ConfigMap { - return &corev1.ConfigMap{ - ObjectMeta: om(system.Namespace, ControllerConfigMapName), - Data: map[string]string{ - ClusterProvisionerNameConfigMapKey: clusterProvisionerName, - BrokerConfigMapKey: "test-broker", - }, +func getControllerConfig() *KafkaProvisionerConfig { + return &KafkaProvisionerConfig{ + Name: clusterProvisionerName, + Brokers: []string{"test-broker"}, } } diff --git a/pkg/provisioners/kafka/controller/types.go b/pkg/provisioners/kafka/controller/types.go new file mode 100644 index 00000000000..620063ff2bd --- /dev/null +++ b/pkg/provisioners/kafka/controller/types.go @@ -0,0 +1,6 @@ +package controller + +type KafkaProvisionerConfig struct { + Name string + Brokers []string +} diff --git a/pkg/provisioners/kafka/controller/util.go b/pkg/provisioners/kafka/controller/util.go deleted file mode 100644 index 6d3ff629cac..00000000000 --- a/pkg/provisioners/kafka/controller/util.go +++ /dev/null @@ -1,58 +0,0 @@ -package controller - -import ( - "context" - "fmt" - "strings" - - corev1 "k8s.io/api/core/v1" - runtimeClient "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/knative/eventing/pkg/system" -) - -const ( - // controllerConfigMapName is the name of the configmap in the eventing - // namespace that holds the configuration for this controller. - ControllerConfigMapName = "kafka-provisioner-config" - - ClusterProvisionerNameConfigMapKey = "cluster-provisioner-name" - BrokerConfigMapKey = "brokers" -) - -type KafkaProvisionerConfig struct { - Name string - Brokers []string -} - -// GetProvisionerConfig returns the details of the associated Provisioner/ClusterProvisioner object -func GetProvisionerConfig(client runtimeClient.Client) (*KafkaProvisionerConfig, error) { - configMapKey := runtimeClient.ObjectKey{ - Namespace: system.Namespace, - Name: ControllerConfigMapName, - } - - configMap := &corev1.ConfigMap{} - if err := client.Get(context.TODO(), configMapKey, configMap); err != nil { - return nil, err - } - - config := &KafkaProvisionerConfig{} - - if value, ok := configMap.Data[ClusterProvisionerNameConfigMapKey]; ok { - config.Name = value - } else { - return nil, fmt.Errorf("missing key %s in config map %s", ClusterProvisionerNameConfigMapKey, ControllerConfigMapName) - } - - if value, ok := configMap.Data[BrokerConfigMapKey]; ok { - brokers := strings.Split(value, ",") - if len(brokers) == 0 { - return nil, fmt.Errorf("missing kafka brokers in configmap %s", ControllerConfigMapName) - } - config.Brokers = brokers - return config, nil - } - - return nil, fmt.Errorf("missing key %s in config map %s", BrokerConfigMapKey, ControllerConfigMapName) -} diff --git a/pkg/provisioners/kafka/main.go b/pkg/provisioners/kafka/main.go index 4ece9bdc89a..61609d069a2 100644 --- a/pkg/provisioners/kafka/main.go +++ b/pkg/provisioners/kafka/main.go @@ -2,7 +2,9 @@ package main import ( "flag" + "fmt" "os" + "strings" "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" @@ -16,6 +18,12 @@ import ( "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" provisionerController "github.com/knative/eventing/pkg/provisioners/kafka/controller" "github.com/knative/eventing/pkg/provisioners/kafka/controller/channel" + "github.com/knative/pkg/configmap" +) + +const ( + ClusterProvisionerNameConfigMapKey = "cluster-provisioner-name" + BrokerConfigMapKey = "brokers" ) var log = logf.Log.WithName("kafka-provisioner") @@ -24,7 +32,7 @@ var log = logf.Log.WithName("kafka-provisioner") type SchemeFunc func(*runtime.Scheme) error // ProvideFunc adds a controller to a Manager. -type ProvideFunc func(mgr manager.Manager, log logr.Logger) (controller.Controller, error) +type ProvideFunc func(mgr manager.Manager, config *provisionerController.KafkaProvisionerConfig, log logr.Logger) (controller.Controller, error) func main() { flag.Parse() @@ -53,8 +61,15 @@ func main() { channel.ProvideController, } + provisionerConfig, err := getProvisionerConfig() + + if err != nil { + entryLog.Error(err, "unable to run controller manager") + os.Exit(1) + } + for _, provider := range providers { - if _, err := provider(mgr, log); err != nil { + if _, err := provider(mgr, provisionerConfig, log); err != nil { entryLog.Error(err, "unable to run controller manager") os.Exit(1) } @@ -62,3 +77,34 @@ func main() { mgr.Start(signals.SetupSignalHandler()) } + +// getProvisionerConfig returns the details of the associated Provisioner/ClusterProvisioner object +func getProvisionerConfig() (*provisionerController.KafkaProvisionerConfig, error) { + configMap, err := configmap.Load("/etc/config-provisioner") + if err != nil { + return nil, fmt.Errorf("error loading provisioner configuration: %s", err) + } + + if len(configMap) == 0 { + return nil, fmt.Errorf("missing provisioner configuration") + } + + config := &provisionerController.KafkaProvisionerConfig{} + + if value, ok := configMap[ClusterProvisionerNameConfigMapKey]; ok { + config.Name = value + } else { + return nil, fmt.Errorf("missing key %s in provisioner configuration", ClusterProvisionerNameConfigMapKey) + } + + if value, ok := configMap[BrokerConfigMapKey]; ok { + brokers := strings.Split(value, ",") + if len(brokers) == 0 { + return nil, fmt.Errorf("missing kafka brokers in provisioner configuration") + } + config.Brokers = brokers + return config, nil + } + + return nil, fmt.Errorf("missing key %s in provisioner configuration", BrokerConfigMapKey) +} From 2ec153bb12eb2f4c28f7c51292770002002253ce Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Tue, 9 Oct 2018 18:25:04 -0700 Subject: [PATCH 04/20] Add more tests, improv coverage --- .../eventing/v1alpha1/channel_types_test.go | 87 +++++++++++++++++++ .../cluster_provisioner_types_test.go | 21 ++++- .../controller/channel/reconcile_test.go | 36 ++++++++ .../kafka/controller/reconcile_test.go | 51 +++++++++++ 4 files changed, 194 insertions(+), 1 deletion(-) diff --git a/pkg/apis/eventing/v1alpha1/channel_types_test.go b/pkg/apis/eventing/v1alpha1/channel_types_test.go index e8a2b2d188a..c6824635fbc 100644 --- a/pkg/apis/eventing/v1alpha1/channel_types_test.go +++ b/pkg/apis/eventing/v1alpha1/channel_types_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" corev1 "k8s.io/api/core/v1" ) @@ -80,3 +81,89 @@ func TestChannelGetCondition(t *testing.T) { }) } } + +func TestChannelInitializeConditions(t *testing.T) { + tests := []struct { + name string + cs *ChannelStatus + want *ChannelStatus + }{{ + name: "empty", + cs: &ChannelStatus{}, + want: &ChannelStatus{ + Conditions: []duckv1alpha1.Condition{{ + Type: ChannelConditionProvisioned, + Status: corev1.ConditionUnknown, + }, { + Type: ChannelConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, { + name: "one false", + cs: &ChannelStatus{ + Conditions: []duckv1alpha1.Condition{{ + Type: ChannelConditionProvisioned, + Status: corev1.ConditionFalse, + }}, + }, + want: &ChannelStatus{ + Conditions: []duckv1alpha1.Condition{{ + Type: ChannelConditionProvisioned, + Status: corev1.ConditionFalse, + }, { + Type: ChannelConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, { + name: "one true", + cs: &ChannelStatus{ + Conditions: []duckv1alpha1.Condition{{ + Type: ChannelConditionProvisioned, + Status: corev1.ConditionTrue, + }}, + }, + want: &ChannelStatus{ + Conditions: []duckv1alpha1.Condition{{ + Type: ChannelConditionProvisioned, + Status: corev1.ConditionTrue, + }, { + Type: ChannelConditionReady, + Status: corev1.ConditionUnknown, + }}}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.cs.InitializeConditions() + ignore := cmpopts.IgnoreFields(duckv1alpha1.Condition{}, "LastTransitionTime") + if diff := cmp.Diff(test.want, test.cs, ignore); diff != "" { + t.Errorf("unexpected conditions (-want, +got) = %v", diff) + } + }) + } +} + +func TestChannelStatus_MarkAsNotProvisioned(t *testing.T) { + cs := &ChannelStatus{} + cs.InitializeConditions() + want := &ChannelStatus{ + Conditions: []duckv1alpha1.Condition{{ + Type: ChannelConditionProvisioned, + Status: corev1.ConditionFalse, + Reason: "Not Provisioned", + Message: "testing", + }, { + Type: ChannelConditionReady, + Status: corev1.ConditionFalse, + Reason: "Not Provisioned", + Message: "testing", + }}} + ignore := cmpopts.IgnoreFields(duckv1alpha1.Condition{}, "LastTransitionTime") + cs.MarkAsNotProvisioned("Not Provisioned", "testing") + if diff := cmp.Diff(want, cs, ignore); diff != "" { + t.Errorf("unexpected conditions (-want, +got) = %v", diff) + } +} diff --git a/pkg/apis/eventing/v1alpha1/cluster_provisioner_types_test.go b/pkg/apis/eventing/v1alpha1/cluster_provisioner_types_test.go index 8a9ab747aa0..0eef70ce458 100644 --- a/pkg/apis/eventing/v1alpha1/cluster_provisioner_types_test.go +++ b/pkg/apis/eventing/v1alpha1/cluster_provisioner_types_test.go @@ -68,7 +68,26 @@ func TestClusterProvisionerStatusIsReady(t *testing.T) { }}, }, want: false, - }} + }, { + name: "mark provisioner ready", + ps: func() *ClusterProvisionerStatus { + ps := &ClusterProvisionerStatus{} + ps.InitializeConditions() + ps.MarkProvisionerReady() + return ps + }(), + want: true, + }, + { + name: "mark provisioner not ready", + ps: func() *ClusterProvisionerStatus { + ps := &ClusterProvisionerStatus{} + ps.InitializeConditions() + ps.MarkProvisionerNotReady("Not Ready", "testing") + return ps + }(), + want: false, + }} for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go index 04e1256e8b1..8b0198e2fc9 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -17,6 +17,7 @@ limitations under the License. package channel import ( + "context" "fmt" "testing" @@ -27,6 +28,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" @@ -51,6 +53,18 @@ func init() { eventingv1alpha1.AddToScheme(scheme.Scheme) } +var mockFetchError = controllertesting.Mocks{ + MockGets: []controllertesting.MockGet{ + func(innerClient client.Client, ctx context.Context, key client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) { + if _, ok := obj.(*eventingv1alpha1.Channel); ok { + err := fmt.Errorf("error fetching") + return controllertesting.Handled, err + } + return controllertesting.Unhandled, nil + }, + }, +} + var testCases = []controllertesting.TestCase{ { Name: "new channel with valid provisioner: adds not provisioned status", @@ -116,6 +130,28 @@ var testCases = []controllertesting.TestCase{ }, IgnoreTimes: true, }, + { + Name: "channel not found", + InitialState: []runtime.Object{}, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), + WantResult: reconcile.Result{}, + WantPresent: []runtime.Object{}, + IgnoreTimes: true, + }, + { + Name: "error fetching channel", + InitialState: []runtime.Object{ + getNewClusterProvisioner(clusterProvisionerName, true), + getNewChannel(channelName, clusterProvisionerName), + }, + Mocks: mockFetchError, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), + WantErrMsg: "error fetching", + WantPresent: []runtime.Object{ + getNewClusterProvisioner(clusterProvisionerName, true), + getNewChannel(channelName, clusterProvisionerName), + }, + }, } func TestAllCases(t *testing.T) { diff --git a/pkg/provisioners/kafka/controller/reconcile_test.go b/pkg/provisioners/kafka/controller/reconcile_test.go index 07a33f0b75e..bed366bf73b 100644 --- a/pkg/provisioners/kafka/controller/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/reconcile_test.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "context" "fmt" "testing" @@ -27,6 +28,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" @@ -59,6 +61,18 @@ var ClusterProvisionerConditionProvisionerReady = duckv1alpha1.Condition{ Status: corev1.ConditionTrue, } +var mockFetchError = controllertesting.Mocks{ + MockGets: []controllertesting.MockGet{ + func(innerClient client.Client, ctx context.Context, key client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) { + if _, ok := obj.(*eventingv1alpha1.ClusterProvisioner); ok { + err := fmt.Errorf("error fetching") + return controllertesting.Handled, err + } + return controllertesting.Unhandled, nil + }, + }, +} + var testCases = []controllertesting.TestCase{ { Name: "new channel clusterprovisioner: adds status", @@ -94,6 +108,36 @@ var testCases = []controllertesting.TestCase{ GetNewChannelClusterProvisioner("not-default-provisioner"), }, }, + { + Name: "clusterprovisioner not found", + InitialState: []runtime.Object{}, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterProvisionerName), + WantResult: reconcile.Result{}, + WantPresent: []runtime.Object{}, + }, + { + Name: "error fetching clusterprovisioner", + InitialState: []runtime.Object{ + GetNewChannelClusterProvisioner(clusterProvisionerName), + }, + Mocks: mockFetchError, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterProvisionerName), + WantErrMsg: "error fetching", + WantPresent: []runtime.Object{ + GetNewChannelClusterProvisioner(clusterProvisionerName), + }, + }, + { + Name: "deleted clusterprovisioner", + InitialState: []runtime.Object{ + GetNewChannelClusterProvisionerDeleted(clusterProvisionerName), + }, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterProvisionerName), + WantResult: reconcile.Result{}, + WantPresent: []runtime.Object{ + GetNewChannelClusterProvisionerDeleted(clusterProvisionerName), + }, + }, } func TestAllCases(t *testing.T) { @@ -144,6 +188,13 @@ func GetNewChannelClusterProvisionerReady(name string) *eventingv1alpha1.Cluster return c } +func GetNewChannelClusterProvisionerDeleted(name string) *eventingv1alpha1.ClusterProvisioner { + c := GetNewChannelClusterProvisioner(name) + deletedTime := metav1.Now().Rfc3339Copy() + c.DeletionTimestamp = &deletedTime + return c +} + func ClusterProvisonerType() metav1.TypeMeta { return metav1.TypeMeta{ APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), From 4c8232db14d1764714a5288cf32dcf50b40d78ca Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Wed, 10 Oct 2018 16:24:46 -0700 Subject: [PATCH 05/20] Add ChannelStatus.IsReady --- pkg/apis/eventing/v1alpha1/channel_types.go | 12 +++++++- .../eventing/v1alpha1/channel_types_test.go | 30 +++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/pkg/apis/eventing/v1alpha1/channel_types.go b/pkg/apis/eventing/v1alpha1/channel_types.go index 0dc8e2416a1..835e358f93f 100644 --- a/pkg/apis/eventing/v1alpha1/channel_types.go +++ b/pkg/apis/eventing/v1alpha1/channel_types.go @@ -117,12 +117,22 @@ func (cs *ChannelStatus) GetCondition(t duckv1alpha1.ConditionType) *duckv1alpha return chanCondSet.Manage(cs).GetCondition(t) } +// IsReady returns true if the resource is ready overall. +func (cs *ChannelStatus) IsReady() bool { + return chanCondSet.Manage(cs).IsHappy() +} + // InitializeConditions sets relevant unset conditions to Unknown state. func (cs *ChannelStatus) InitializeConditions() { chanCondSet.Manage(cs).InitializeConditions() } -// InitializeConditions sets relevant unset conditions to Unknown state. +// MarkAsProvisioned sets ChannelConditionProvisioned condition to True state. +func (cs *ChannelStatus) MarkAsProvisioned() { + chanCondSet.Manage(cs).MarkTrue(ChannelConditionProvisioned) +} + +// MarkAsNotProvisioned sets ChannelConditionProvisioned condition to False state. func (cs *ChannelStatus) MarkAsNotProvisioned(reason, messageFormat string, messageA ...interface{}) { chanCondSet.Manage(cs).MarkFalse(ChannelConditionProvisioned, reason, messageFormat, messageA...) } diff --git a/pkg/apis/eventing/v1alpha1/channel_types_test.go b/pkg/apis/eventing/v1alpha1/channel_types_test.go index c6824635fbc..55bde06c8f9 100644 --- a/pkg/apis/eventing/v1alpha1/channel_types_test.go +++ b/pkg/apis/eventing/v1alpha1/channel_types_test.go @@ -146,6 +146,36 @@ func TestChannelInitializeConditions(t *testing.T) { } } +func TestChannelIsReady(t *testing.T) { + tests := []struct { + name string + markProvisioned bool + wantReady bool + }{{ + name: "all happy", + markProvisioned: true, + wantReady: true, + }, { + name: "one sad", + markProvisioned: false, + wantReady: false, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cs := &ChannelStatus{} + if test.markProvisioned { + cs.MarkAsProvisioned() + } else { + cs.MarkAsNotProvisioned("NotProvisioned", "testing") + } + got := cs.IsReady() + if test.wantReady != got { + t.Errorf("unexpected readiness: want %v, got %v", test.wantReady, got) + } + }) + } +} + func TestChannelStatus_MarkAsNotProvisioned(t *testing.T) { cs := &ChannelStatus{} cs.InitializeConditions() From 00ab78ef7ec7e546226008e0b30b589aa512c4f5 Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Mon, 15 Oct 2018 13:40:43 -0700 Subject: [PATCH 06/20] Address PR comments --- config/provisioners/kafka/README.md | 43 +++++++++ config/provisioners/kafka/broker/README.md | 13 +++ .../kafka/broker/kafka-broker.yaml | 87 +++++++++++++++++++ .../kafka/clusterprovisioner.yaml | 22 ----- config/provisioners/kafka/clusterrole.yaml | 28 ------ .../kafka/clusterrolebinding.yaml | 26 ------ .../kafka/kafka-provisioner-config.yaml | 24 ----- ...controller.yaml => kafka-provisioner.yaml} | 59 +++++++++++-- .../provisioners/kafka/service-account.yaml | 19 ---- .../v1alpha1/cluster_provisioner_types.go | 15 ++-- .../cluster_provisioner_types_test.go | 45 +++------- .../kafka/controller/channel/provider.go | 15 +--- .../kafka/controller/channel/reconcile.go | 53 ++++++----- .../controller/channel/reconcile_test.go | 14 +-- pkg/provisioners/kafka/controller/provider.go | 15 +--- .../kafka/controller/reconcile.go | 22 ++--- .../kafka/controller/reconcile_test.go | 16 +--- 17 files changed, 269 insertions(+), 247 deletions(-) create mode 100644 config/provisioners/kafka/README.md create mode 100644 config/provisioners/kafka/broker/README.md create mode 100644 config/provisioners/kafka/broker/kafka-broker.yaml delete mode 100644 config/provisioners/kafka/clusterprovisioner.yaml delete mode 100644 config/provisioners/kafka/clusterrole.yaml delete mode 100644 config/provisioners/kafka/clusterrolebinding.yaml delete mode 100644 config/provisioners/kafka/kafka-provisioner-config.yaml rename config/provisioners/kafka/{controller.yaml => kafka-provisioner.yaml} (50%) delete mode 100644 config/provisioners/kafka/service-account.yaml diff --git a/config/provisioners/kafka/README.md b/config/provisioners/kafka/README.md new file mode 100644 index 00000000000..ef0f8affd96 --- /dev/null +++ b/config/provisioners/kafka/README.md @@ -0,0 +1,43 @@ +# Apache Kafka - Knative Channel Provisioner + +Deployment steps: +1. Setup [Knative Eventing](../../../DEVELOPMENT.md) +1. Install an Apache Kafka cluster. There are two choices: + * Simple installation of [Apache Kafka](broker). + * A production grade installation using the [Strimzi Kafka Operator](strimzi). + +1. Now that the Apache Kafka is installed, configure the bus to use the Kafka broker, replace the broker URL if not using the provided broker: + ``` + kubectl create configmap kafka-provisioner-config --from-literal=KAFKA_BOOTSTRAP_SERVERS=kafkabroker.kafka:9092 + ``` + > Note: If you are using Strimzi, the value for the URL is `my-cluster-kafka-bootstrap.kafka.9092`. +1. For cluster wide deployment, change the kind in `config/buses/kafka/kafka-bus.yaml` from `Bus` to `ClusterBus`. +1. Apply the Kafka Bus: + ``` + ko apply -f config/buses/kafka/ + ``` +1. If you want to set the default Knative Bus to Kafka run the following command to edit the Knative Eventing configuration (requires the above change in kind from `Bus` to `ClusterBus`): + ```shell + kubectl get cm flow-controller-config -n knative-eventing -oyaml \ + | sed -e 's/default-cluster-bus: stub/ default-cluster-bus: kafka/' \ + | kubectl replace -f - + ``` +1. Create Channels that reference the 'kafka' Bus +1. (Optional) Install [Kail](https://github.com/boz/kail) - Kubernetes tail + +The bus has an independent provisioner and dispatcher. + +The provisioner will create Kafka topics for each Knative Channel +targeting the Bus (named `.`. +Clients should avoid interacting with topics provisioned by the bus. + +The dispatcher +- receives events via a Channel's Service from inside the cluster and +writes them to the corresponding Kafka topic +- creates a Kafka consumer for each `Subscription`, that reads events +from the subscription's channel and forwards them over HTTP to the +subscriber. + +To view logs: +- for the dispatcher `kail -d kafka-[namespace]-bus-dispatcher -c dispatcher` +- for the provisioner `kail -d kafka-[namespace]-bus-provisioner -c provisioner` diff --git a/config/provisioners/kafka/broker/README.md b/config/provisioners/kafka/broker/README.md new file mode 100644 index 00000000000..c8b6da84cbe --- /dev/null +++ b/config/provisioners/kafka/broker/README.md @@ -0,0 +1,13 @@ +# Apache Kakfa - simple installation + +1. For an installation of a simple Apache Kafka cluster, a setup is provided: + ``` + kubectl create namespace kafka + kubectl apply -n kafka -f kafka-broker.yaml + ``` + > Note: If you are running Knative on OpenShift you will need to run the following command first to allow the Kafka broker to run as root: + ``` + oc adm policy add-scc-to-user anyuid -z default -n kafka + ``` + +Continue the configuration of Knative Eventing with [step `3`](../). \ No newline at end of file diff --git a/config/provisioners/kafka/broker/kafka-broker.yaml b/config/provisioners/kafka/broker/kafka-broker.yaml new file mode 100644 index 00000000000..469e697a526 --- /dev/null +++ b/config/provisioners/kafka/broker/kafka-broker.yaml @@ -0,0 +1,87 @@ +########################################## KAFKA BROKER ###################################### +# The following does not need to live in the same namespace as the bus. +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: kafka-broker +spec: + replicas: 1 + template: + metadata: + labels: + app: kafka-broker + spec: + containers: + - name: kafka-broker + image: wurstmeister/kafka:1.1.0 + ports: + - containerPort: 9092 + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KAFKA_BROKER_ID + value: "0" + - name: KAFKA_LISTENERS + value: "INTERNAL://:9093,EXTERNAL://:9092" + - name: KAFKA_ADVERTISED_LISTENERS + value: "INTERNAL://:9093,EXTERNAL://kafkabroker.$(MY_POD_NAMESPACE):9092" + - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP + value: "INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT" + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: "INTERNAL" + - name: KAFKA_ZOOKEEPER_CONNECT + value: "zookeeper.$(MY_POD_NAMESPACE):2181" + - name: KAFKA_AUTO_CREATE_TOPICS_ENABLE + value: "false" +--- +apiVersion: v1 +kind: Service +metadata: + name: kafkabroker +spec: + type: NodePort + selector: + app: kafka-broker + ports: + - port: 9092 + name: kafka + protocol: TCP +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: zookeeper +spec: + replicas: 1 + template: + metadata: + labels: + app: zookeeper + spec: + containers: + - name: zookeeper + image: wurstmeister/zookeeper:3.4.6 + ports: + - containerPort: 2181 + env: + - name: ZOOKEEPER_ID + value: "1" + - name: ZOOKEEPER_SERVER_1 + value: zookeeper + +--- +apiVersion: v1 +kind: Service +metadata: + name: zookeeper +spec: + selector: + app: zookeeper + ports: + - port: 2181 + name: zookeeper + protocol: TCP + diff --git a/config/provisioners/kafka/clusterprovisioner.yaml b/config/provisioners/kafka/clusterprovisioner.yaml deleted file mode 100644 index 202f9adcab4..00000000000 --- a/config/provisioners/kafka/clusterprovisioner.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: eventing.knative.dev/v1alpha1 -kind: ClusterProvisioner -metadata: - name: kafka -spec: - reconciles: - group: eventing.knative.dev - kind: Channel \ No newline at end of file diff --git a/config/provisioners/kafka/clusterrole.yaml b/config/provisioners/kafka/clusterrole.yaml deleted file mode 100644 index 6aa22f158ed..00000000000 --- a/config/provisioners/kafka/clusterrole.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kafka-provisioner -rules: -- apiGroups: ["eventing.knative.dev"] - resources: ["clusterprovisioners"] - verbs: ["get", "watch", "list", "update", "patch"] -- apiGroups: ["eventing.knative.dev"] - resources: ["channels"] - verbs: ["get", "watch", "list", "update", "patch"] -- apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] \ No newline at end of file diff --git a/config/provisioners/kafka/clusterrolebinding.yaml b/config/provisioners/kafka/clusterrolebinding.yaml deleted file mode 100644 index d3789e784f8..00000000000 --- a/config/provisioners/kafka/clusterrolebinding.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: kafka-provisioner-manage -subjects: - - kind: ServiceAccount - name: kafka-provisioner - namespace: knative-eventing -roleRef: - kind: ClusterRole - name: kafka-provisioner - apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/config/provisioners/kafka/kafka-provisioner-config.yaml b/config/provisioners/kafka/kafka-provisioner-config.yaml deleted file mode 100644 index cf27fe525fd..00000000000 --- a/config/provisioners/kafka/kafka-provisioner-config.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: ConfigMap -metadata: - name: kafka-provisioner-config - namespace: knative-eventing -data: - # Name of the provisioner that this controller represents - cluster-provisioner-name: kafka - # Broker URL's for the provisioner - brokers: kafkabroker.kafka:9092 \ No newline at end of file diff --git a/config/provisioners/kafka/controller.yaml b/config/provisioners/kafka/kafka-provisioner.yaml similarity index 50% rename from config/provisioners/kafka/controller.yaml rename to config/provisioners/kafka/kafka-provisioner.yaml index e8fa2b9b522..89b95c41c9e 100644 --- a/config/provisioners/kafka/controller.yaml +++ b/config/provisioners/kafka/kafka-provisioner.yaml @@ -12,6 +12,59 @@ # See the License for the specific language governing permissions and # limitations under the License. +apiVersion: eventing.knative.dev/v1alpha1 +kind: ClusterProvisioner +metadata: + name: kafka +spec: + reconciles: + group: eventing.knative.dev + kind: Channel +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka-provisioner + namespace: knative-eventing +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kafka-provisioner +rules: +- apiGroups: ["eventing.knative.dev"] + resources: ["clusterprovisioners", "channels"] + verbs: ["get", "watch", "list", "update"] +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: kafka-provisioner-manage +subjects: + - kind: ServiceAccount + name: kafka-provisioner + namespace: knative-eventing +roleRef: + kind: ClusterRole + name: kafka-provisioner + apiGroup: rbac.authorization.k8s.io +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: kafka-provisioner-config + namespace: knative-eventing +data: + # Name of the provisioner that this controller represents + cluster-provisioner-name: kafka + # Broker URL's for the provisioner + brokers: kafkabroker.kafka:9092 +--- + apiVersion: apps/v1beta1 kind: Deployment metadata: @@ -28,14 +81,10 @@ spec: containers: - name: kafka-provisioner-controller image: github.com/knative/eventing/pkg/provisioners/kafka - args: [ - "-logtostderr", - "-stderrthreshold", "INFO", - ] volumeMounts: - name: kafka-provisioner-config mountPath: /etc/config-provisioner volumes: - name: kafka-provisioner-config configMap: - name: kafka-provisioner-config \ No newline at end of file + name: kafka-provisioner-config diff --git a/config/provisioners/kafka/service-account.yaml b/config/provisioners/kafka/service-account.yaml deleted file mode 100644 index bacb3e751e0..00000000000 --- a/config/provisioners/kafka/service-account.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kafka-provisioner - namespace: knative-eventing \ No newline at end of file diff --git a/pkg/apis/eventing/v1alpha1/cluster_provisioner_types.go b/pkg/apis/eventing/v1alpha1/cluster_provisioner_types.go index cf7596357ab..4dd79b30ea6 100644 --- a/pkg/apis/eventing/v1alpha1/cluster_provisioner_types.go +++ b/pkg/apis/eventing/v1alpha1/cluster_provisioner_types.go @@ -66,7 +66,7 @@ type ClusterProvisionerSpec struct { Reconciles metav1.GroupKind `json:"reconciles"` } -var cProvCondSet = duckv1alpha1.NewLivingConditionSet(ClusterProvisionerConditionProvisionerReady) +var cProvCondSet = duckv1alpha1.NewLivingConditionSet() // ClusterProvisionerStatus is the status for a ClusterProvisioner resource type ClusterProvisionerStatus struct { @@ -84,11 +84,8 @@ type ClusterProvisionerStatus struct { const ( - // ClusterProvisionerConditionReady has status True when all subconditions below have been set to True. + // ClusterProvisionerConditionReady has status True when provisioner is ready to provision backing resource. ClusterProvisionerConditionReady = duckv1alpha1.ConditionReady - - // ClusterProvisionerConditionProvisionerReady has status True when the provisioner is ready - ClusterProvisionerConditionProvisionerReady duckv1alpha1.ConditionType = "ProvisionerReady" ) // GetCondition returns the condition currently associated with the given type, or nil. @@ -102,13 +99,13 @@ func (ps *ClusterProvisionerStatus) IsReady() bool { } // MarkProvisionerReady sets the condition that the provisioner is ready to provision backing resource. -func (ps *ClusterProvisionerStatus) MarkProvisionerReady() { - cProvCondSet.Manage(ps).MarkTrue(ClusterProvisionerConditionProvisionerReady) +func (ps *ClusterProvisionerStatus) MarkReady() { + cProvCondSet.Manage(ps).MarkTrue(ClusterProvisionerConditionReady) } // MarkProvisionerNotReady sets the condition that the provisioner is not ready to provision backing resource. -func (ps *ClusterProvisionerStatus) MarkProvisionerNotReady(reason, messageFormat string, messageA ...interface{}) { - cProvCondSet.Manage(ps).MarkFalse(ClusterProvisionerConditionProvisionerReady, reason, messageFormat, messageA...) +func (ps *ClusterProvisionerStatus) MarkNotReady(reason, messageFormat string, messageA ...interface{}) { + cProvCondSet.Manage(ps).MarkFalse(ClusterProvisionerConditionReady, reason, messageFormat, messageA...) } // InitializeConditions sets relevant unset conditions to Unknown state. diff --git a/pkg/apis/eventing/v1alpha1/cluster_provisioner_types_test.go b/pkg/apis/eventing/v1alpha1/cluster_provisioner_types_test.go index 0eef70ce458..0d86d2dd5de 100644 --- a/pkg/apis/eventing/v1alpha1/cluster_provisioner_types_test.go +++ b/pkg/apis/eventing/v1alpha1/cluster_provisioner_types_test.go @@ -43,21 +43,21 @@ func TestClusterProvisionerStatusIsReady(t *testing.T) { want: false, }, { name: "ready true condition", - ps: &ClusterProvisionerStatus{ - Conditions: []duckv1alpha1.Condition{{ - Type: ChannelConditionReady, - Status: corev1.ConditionTrue, - }}, - }, + ps: func() *ClusterProvisionerStatus { + ps := &ClusterProvisionerStatus{} + ps.InitializeConditions() + ps.MarkReady() + return ps + }(), want: true, }, { name: "ready false condition", - ps: &ClusterProvisionerStatus{ - Conditions: []duckv1alpha1.Condition{{ - Type: ChannelConditionReady, - Status: corev1.ConditionFalse, - }}, - }, + ps: func() *ClusterProvisionerStatus { + ps := &ClusterProvisionerStatus{} + ps.InitializeConditions() + ps.MarkNotReady("Not Ready", "testing") + return ps + }(), want: false, }, { name: "unknown condition", @@ -68,26 +68,7 @@ func TestClusterProvisionerStatusIsReady(t *testing.T) { }}, }, want: false, - }, { - name: "mark provisioner ready", - ps: func() *ClusterProvisionerStatus { - ps := &ClusterProvisionerStatus{} - ps.InitializeConditions() - ps.MarkProvisionerReady() - return ps - }(), - want: true, - }, - { - name: "mark provisioner not ready", - ps: func() *ClusterProvisionerStatus { - ps := &ClusterProvisionerStatus{} - ps.InitializeConditions() - ps.MarkProvisionerNotReady("Not Ready", "testing") - return ps - }(), - want: false, - }} + }} for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/provisioners/kafka/controller/channel/provider.go b/pkg/provisioners/kafka/controller/channel/provider.go index 8fcab75de36..2025cdb25e4 100644 --- a/pkg/provisioners/kafka/controller/channel/provider.go +++ b/pkg/provisioners/kafka/controller/channel/provider.go @@ -18,7 +18,6 @@ package channel import ( "github.com/go-logr/logr" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -38,11 +37,10 @@ const ( ) type reconciler struct { - client client.Client - restConfig *rest.Config - recorder record.EventRecorder - log logr.Logger - config *common.KafkaProvisionerConfig + client client.Client + recorder record.EventRecorder + log logr.Logger + config *common.KafkaProvisionerConfig } // Verify the struct implements reconcile.Reconciler @@ -74,8 +72,3 @@ func (r *reconciler) InjectClient(c client.Client) error { r.client = c return nil } - -func (r *reconciler) InjectConfig(c *rest.Config) error { - r.restConfig = c - return nil -} diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go index 19925bb4003..e4efe45dbec 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -47,11 +47,33 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err return reconcile.Result{}, err } + // Skip Channel as it is not targeting any provisioner + if channel.Spec.Provisioner == nil || channel.Spec.Provisioner.Ref == nil { + return reconcile.Result{}, nil + } + + // Skip channel not managed by this provisioner + provisionerRef := channel.Spec.Provisioner.Ref + clusterProvisioner, err := r.getClusterProvisioner() + if err != nil { + return reconcile.Result{}, err + } + + if provisionerRef.Name != clusterProvisioner.Name || provisionerRef.Namespace != clusterProvisioner.Namespace { + return reconcile.Result{}, nil + } + original := channel.DeepCopy() - // Reconcile this copy of the Channel and then write back any status - // updates regardless of whether the reconcile error out. - err = r.reconcile(channel) + if clusterProvisioner.Status.IsReady() { + // Reconcile this copy of the Channel and then write back any status + // updates regardless of whether the reconcile error out. + err = r.reconcile(channel) + } else { + channel.Status.MarkNotProvisioned("NotProvisioned", "ClusterProvisioner %s is not ready", clusterProvisioner.Name) + err = fmt.Errorf("ClusterProvisioner %s is not ready", clusterProvisioner.Name) + } + if !equality.Semantic.DeepEqual(original.Status, channel.Status) { // If we didn't change anything then don't call updateStatus. // This is important because the copy we loaded from the informer's @@ -81,31 +103,9 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { return nil } - // Skip Channel as it is not targeting any provisioner - if channel.Spec.Provisioner == nil || channel.Spec.Provisioner.Ref == nil { - return nil - } - - // Skip channel not managed by this provisioner - provisionerRef := channel.Spec.Provisioner.Ref - clusterProvisioner, err := r.getClusterProvisioner() - if err != nil { - return err - } - // TODO: Is there a better way to compare ref? - if provisionerRef.Name != clusterProvisioner.Name || provisionerRef.Namespace != clusterProvisioner.Namespace { - return nil - } - - // The provisioner must be ready - if !clusterProvisioner.Status.IsReady() { - channel.Status.MarkAsNotProvisioned("NotProvisioned", "ClusterProvisioner %s is not ready", clusterProvisioner.Name) - return fmt.Errorf("ClusterProvisioner %s is not ready", clusterProvisioner.Name) - } - // TODO: provision channel channel.Status.InitializeConditions() - channel.Status.MarkAsNotProvisioned("NotProvisioned", "NotImplemented") + channel.Status.MarkNotProvisioned("NotProvisioned", "NotImplemented") return nil } @@ -118,7 +118,6 @@ func (r *reconciler) getClusterProvisioner() (*v1alpha1.ClusterProvisioner, erro return nil, err } return clusterProvisioner, nil - } func (r *reconciler) updateStatus(channel *v1alpha1.Channel) (*v1alpha1.Channel, error) { diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go index 8b0198e2fc9..a5b825757ef 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -160,11 +159,10 @@ func TestAllCases(t *testing.T) { for _, tc := range testCases { c := tc.GetClient() r := &reconciler{ - client: c, - restConfig: &rest.Config{}, - recorder: recorder, - log: log, - config: getControllerConfig(), + client: c, + recorder: recorder, + log: log, + config: getControllerConfig(), } t.Logf("Running test %s", tc.Name) t.Run(tc.Name, tc.Runner(t, r, c)) @@ -249,10 +247,6 @@ func getNewClusterProvisioner(name string, isReady bool) *eventingv1alpha1.Clust }, Status: eventingv1alpha1.ClusterProvisionerStatus{ Conditions: []duckv1alpha1.Condition{ - { - Type: eventingv1alpha1.ClusterProvisionerConditionProvisionerReady, - Status: condStatus, - }, { Type: eventingv1alpha1.ClusterProvisionerConditionReady, Status: condStatus, diff --git a/pkg/provisioners/kafka/controller/provider.go b/pkg/provisioners/kafka/controller/provider.go index 77e387bb0da..781006395cb 100644 --- a/pkg/provisioners/kafka/controller/provider.go +++ b/pkg/provisioners/kafka/controller/provider.go @@ -18,7 +18,6 @@ package controller import ( "github.com/go-logr/logr" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -37,11 +36,10 @@ const ( ) type reconciler struct { - client client.Client - restConfig *rest.Config - recorder record.EventRecorder - log logr.Logger - config *KafkaProvisionerConfig + client client.Client + recorder record.EventRecorder + log logr.Logger + config *KafkaProvisionerConfig } // Verify the struct implements reconcile.Reconciler @@ -73,8 +71,3 @@ func (r *reconciler) InjectClient(c client.Client) error { r.client = c return nil } - -func (r *reconciler) InjectConfig(c *rest.Config) error { - r.restConfig = c - return nil -} diff --git a/pkg/provisioners/kafka/controller/reconcile.go b/pkg/provisioners/kafka/controller/reconcile.go index fdfe0747314..973b7808af2 100644 --- a/pkg/provisioners/kafka/controller/reconcile.go +++ b/pkg/provisioners/kafka/controller/reconcile.go @@ -48,6 +48,16 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err return reconcile.Result{}, err } + // Skip channel provisioners that we don't manage + if provisioner.Name != r.config.Name { + return reconcile.Result{}, nil + } + + // Only reconcile channel provisioners + if provisioner.Spec.Reconciles.Group != eventing.GroupName || provisioner.Spec.Reconciles.Kind != "Channel" { + return reconcile.Result{}, nil + } + original := provisioner.DeepCopy() // Reconcile this copy of the Provisioner and then write back any status @@ -81,19 +91,9 @@ func (r *reconciler) reconcile(provisioner *v1alpha1.ClusterProvisioner) error { return nil } - // Skip channel provisioners that we don't manage - if provisioner.Name != r.config.Name { - return nil - } - - // Only reconcile channel provisioners - if provisioner.Spec.Reconciles.Group != eventing.GroupName || provisioner.Spec.Reconciles.Kind != "Channel" { - return nil - } - provisioner.Status.InitializeConditions() // Update Status as Ready - provisioner.Status.MarkProvisionerReady() + provisioner.Status.MarkReady() return nil } diff --git a/pkg/provisioners/kafka/controller/reconcile_test.go b/pkg/provisioners/kafka/controller/reconcile_test.go index bed366bf73b..fb0710774d1 100644 --- a/pkg/provisioners/kafka/controller/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/reconcile_test.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -56,11 +55,6 @@ var ClusterProvisionerConditionReady = duckv1alpha1.Condition{ Status: corev1.ConditionTrue, } -var ClusterProvisionerConditionProvisionerReady = duckv1alpha1.Condition{ - Type: eventingv1alpha1.ClusterProvisionerConditionProvisionerReady, - Status: corev1.ConditionTrue, -} - var mockFetchError = controllertesting.Mocks{ MockGets: []controllertesting.MockGet{ func(innerClient client.Client, ctx context.Context, key client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) { @@ -146,11 +140,10 @@ func TestAllCases(t *testing.T) { for _, tc := range testCases { c := tc.GetClient() r := &reconciler{ - client: c, - restConfig: &rest.Config{}, - recorder: recorder, - log: log, - config: getControllerConfig(), + client: c, + recorder: recorder, + log: log, + config: getControllerConfig(), } t.Logf("Running test %s", tc.Name) t.Run(tc.Name, tc.Runner(t, r, c)) @@ -181,7 +174,6 @@ func GetNewChannelClusterProvisionerReady(name string) *eventingv1alpha1.Cluster c := GetNewChannelClusterProvisioner(name) c.Status = eventingv1alpha1.ClusterProvisionerStatus{ Conditions: []duckv1alpha1.Condition{ - ClusterProvisionerConditionProvisionerReady, ClusterProvisionerConditionReady, }, } From 514773c2d085595bb9c45301b7cd8fc65b40a66f Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Mon, 15 Oct 2018 13:53:46 -0700 Subject: [PATCH 07/20] Remove unfinished README --- config/provisioners/kafka/README.md | 43 ----------------------------- 1 file changed, 43 deletions(-) delete mode 100644 config/provisioners/kafka/README.md diff --git a/config/provisioners/kafka/README.md b/config/provisioners/kafka/README.md deleted file mode 100644 index ef0f8affd96..00000000000 --- a/config/provisioners/kafka/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# Apache Kafka - Knative Channel Provisioner - -Deployment steps: -1. Setup [Knative Eventing](../../../DEVELOPMENT.md) -1. Install an Apache Kafka cluster. There are two choices: - * Simple installation of [Apache Kafka](broker). - * A production grade installation using the [Strimzi Kafka Operator](strimzi). - -1. Now that the Apache Kafka is installed, configure the bus to use the Kafka broker, replace the broker URL if not using the provided broker: - ``` - kubectl create configmap kafka-provisioner-config --from-literal=KAFKA_BOOTSTRAP_SERVERS=kafkabroker.kafka:9092 - ``` - > Note: If you are using Strimzi, the value for the URL is `my-cluster-kafka-bootstrap.kafka.9092`. -1. For cluster wide deployment, change the kind in `config/buses/kafka/kafka-bus.yaml` from `Bus` to `ClusterBus`. -1. Apply the Kafka Bus: - ``` - ko apply -f config/buses/kafka/ - ``` -1. If you want to set the default Knative Bus to Kafka run the following command to edit the Knative Eventing configuration (requires the above change in kind from `Bus` to `ClusterBus`): - ```shell - kubectl get cm flow-controller-config -n knative-eventing -oyaml \ - | sed -e 's/default-cluster-bus: stub/ default-cluster-bus: kafka/' \ - | kubectl replace -f - - ``` -1. Create Channels that reference the 'kafka' Bus -1. (Optional) Install [Kail](https://github.com/boz/kail) - Kubernetes tail - -The bus has an independent provisioner and dispatcher. - -The provisioner will create Kafka topics for each Knative Channel -targeting the Bus (named `.`. -Clients should avoid interacting with topics provisioned by the bus. - -The dispatcher -- receives events via a Channel's Service from inside the cluster and -writes them to the corresponding Kafka topic -- creates a Kafka consumer for each `Subscription`, that reads events -from the subscription's channel and forwards them over HTTP to the -subscriber. - -To view logs: -- for the dispatcher `kail -d kafka-[namespace]-bus-dispatcher -c dispatcher` -- for the provisioner `kail -d kafka-[namespace]-bus-provisioner -c provisioner` From 8f83d7caa85e41f316d183fe802f9164f9993477 Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Mon, 15 Oct 2018 14:35:16 -0700 Subject: [PATCH 08/20] Switch from logr to zap --- .../kafka/controller/channel/provider.go | 8 +-- .../kafka/controller/channel/reconcile.go | 13 ++-- .../controller/channel/reconcile_test.go | 9 +-- pkg/provisioners/kafka/controller/provider.go | 8 +-- .../kafka/controller/reconcile.go | 13 ++-- .../kafka/controller/reconcile_test.go | 9 +-- pkg/provisioners/kafka/main.go | 19 +++--- pkg/provisioners/logging.go | 63 +++++++++++++++++++ 8 files changed, 101 insertions(+), 41 deletions(-) create mode 100644 pkg/provisioners/logging.go diff --git a/pkg/provisioners/kafka/controller/channel/provider.go b/pkg/provisioners/kafka/controller/channel/provider.go index 2025cdb25e4..4a661e58e90 100644 --- a/pkg/provisioners/kafka/controller/channel/provider.go +++ b/pkg/provisioners/kafka/controller/channel/provider.go @@ -17,7 +17,7 @@ limitations under the License. package channel import ( - "github.com/go-logr/logr" + "go.uber.org/zap" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -39,7 +39,7 @@ const ( type reconciler struct { client client.Client recorder record.EventRecorder - log logr.Logger + logger *zap.Logger config *common.KafkaProvisionerConfig } @@ -47,12 +47,12 @@ type reconciler struct { var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Channel controller. -func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfig, log logr.Logger) (controller.Controller, error) { +func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Channel. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ recorder: mgr.GetRecorder(controllerAgentName), - log: log, + logger: logger, config: config, }, }) diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go index e4efe45dbec..9180aa02dad 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -27,23 +27,24 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "go.uber.org/zap" ) // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Channel resource // with the current status of the resource. func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) { - r.log.Info("Reconciling channel", "request", request) + r.logger.Info("Reconciling channel", zap.Any("request", request)) channel := &v1alpha1.Channel{} err := r.client.Get(context.TODO(), request.NamespacedName, channel) if errors.IsNotFound(err) { - r.log.Info("could not find channel", "request", request) + r.logger.Info("could not find channel", zap.Any("request", request)) return reconcile.Result{}, nil } if err != nil { - r.log.Error(err, "could not fetch Channel", "request", request) + r.logger.Error("could not fetch channel", zap.Error(err)) return reconcile.Result{}, err } @@ -80,7 +81,7 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err // cache may be stale and we don't want to overwrite a prior update // to status with this stale state. if _, err := r.updateStatus(channel); err != nil { - r.log.Info("Failed to update channel status", "error", err) + r.logger.Info("failed to update channel status", zap.Error(err)) return reconcile.Result{}, err } } @@ -93,12 +94,12 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { // See if the channel has been deleted accessor, err := meta.Accessor(channel) if err != nil { - r.log.Info("failed to get metadata", "error", err) + r.logger.Info("failed to get metadata", zap.Error(err)) return err } deletionTimestamp := accessor.GetDeletionTimestamp() if deletionTimestamp != nil { - r.log.Info(fmt.Sprintf("DeletionTimestamp: %v", deletionTimestamp)) + r.logger.Info(fmt.Sprintf("DeletionTimestamp: %v", deletionTimestamp)) //TODO: Handle deletion return nil } diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go index a5b825757ef..d722615e999 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -29,18 +29,14 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "github.com/knative/eventing/pkg/apis/eventing" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" controllertesting "github.com/knative/eventing/pkg/controller/testing" + "github.com/knative/eventing/pkg/provisioners" "github.com/knative/eventing/pkg/provisioners/kafka/controller" ) -var ( - log = logf.Log.WithName("testing") -) - const ( channelName = "test-channel" clusterProvisionerName = "kafka" @@ -158,10 +154,11 @@ func TestAllCases(t *testing.T) { for _, tc := range testCases { c := tc.GetClient() + logger := provisioners.NewProvisionerLoggerFromConfig(provisioners.NewLoggingConfig()) r := &reconciler{ client: c, recorder: recorder, - log: log, + logger: logger.Desugar(), config: getControllerConfig(), } t.Logf("Running test %s", tc.Name) diff --git a/pkg/provisioners/kafka/controller/provider.go b/pkg/provisioners/kafka/controller/provider.go index 781006395cb..8cfb91eb68a 100644 --- a/pkg/provisioners/kafka/controller/provider.go +++ b/pkg/provisioners/kafka/controller/provider.go @@ -17,7 +17,7 @@ limitations under the License. package controller import ( - "github.com/go-logr/logr" + "go.uber.org/zap" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -38,7 +38,7 @@ const ( type reconciler struct { client client.Client recorder record.EventRecorder - log logr.Logger + logger *zap.Logger config *KafkaProvisionerConfig } @@ -46,12 +46,12 @@ type reconciler struct { var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Provisioner controller. -func ProvideController(mgr manager.Manager, config *KafkaProvisionerConfig, log logr.Logger) (controller.Controller, error) { +func ProvideController(mgr manager.Manager, config *KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Provisioners. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ recorder: mgr.GetRecorder(controllerAgentName), - log: log, + logger: logger, config: config, }, }) diff --git a/pkg/provisioners/kafka/controller/reconcile.go b/pkg/provisioners/kafka/controller/reconcile.go index 973b7808af2..3fbe31a3280 100644 --- a/pkg/provisioners/kafka/controller/reconcile.go +++ b/pkg/provisioners/kafka/controller/reconcile.go @@ -28,23 +28,24 @@ import ( "github.com/knative/eventing/pkg/apis/eventing" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "go.uber.org/zap" ) // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Provisioner resource // with the current status of the resource. func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) { - r.log.Info("reconciling ClusterProvisioner", "request", request) + r.logger.Info("reconciling ClusterProvisioner", zap.Any("request", request)) provisioner := &v1alpha1.ClusterProvisioner{} err := r.client.Get(context.TODO(), request.NamespacedName, provisioner) if errors.IsNotFound(err) { - r.log.Info("could not find ClusterProvisioner", "request", request) + r.logger.Info("could not find ClusterProvisioner", zap.Any("request", request)) return reconcile.Result{}, nil } if err != nil { - r.log.Error(err, "could not fetch ClusterProvisioner", "request", request) + r.logger.Error("could not fetch ClusterProvisioner", zap.Error(err)) return reconcile.Result{}, err } @@ -69,7 +70,7 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err // cache may be stale and we don't want to overwrite a prior update // to status with this stale state. if _, err := r.updateStatus(provisioner); err != nil { - r.log.Info("failed to update Provisioner status", "error", err) + r.logger.Info("failed to update Provisioner status", zap.Error(err)) return reconcile.Result{}, err } } @@ -82,12 +83,12 @@ func (r *reconciler) reconcile(provisioner *v1alpha1.ClusterProvisioner) error { // See if the provisioner has been deleted accessor, err := meta.Accessor(provisioner) if err != nil { - r.log.Info("failed to get metadata", "error", err) + r.logger.Info("failed to get metadata", zap.Error(err)) return err } deletionTimestamp := accessor.GetDeletionTimestamp() if deletionTimestamp != nil { - r.log.Info(fmt.Sprintf("DeletionTimestamp: %v", deletionTimestamp)) + r.logger.Info(fmt.Sprintf("DeletionTimestamp: %v", deletionTimestamp)) return nil } diff --git a/pkg/provisioners/kafka/controller/reconcile_test.go b/pkg/provisioners/kafka/controller/reconcile_test.go index fb0710774d1..fda1839a6c1 100644 --- a/pkg/provisioners/kafka/controller/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/reconcile_test.go @@ -29,15 +29,11 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "github.com/knative/eventing/pkg/apis/eventing" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" controllertesting "github.com/knative/eventing/pkg/controller/testing" -) - -var ( - log = logf.Log.WithName("testing") + "github.com/knative/eventing/pkg/provisioners" ) const ( @@ -139,10 +135,11 @@ func TestAllCases(t *testing.T) { for _, tc := range testCases { c := tc.GetClient() + logger := provisioners.NewProvisionerLoggerFromConfig(provisioners.NewLoggingConfig()) r := &reconciler{ client: c, recorder: recorder, - log: log, + logger: logger.Desugar(), config: getControllerConfig(), } t.Logf("Running test %s", tc.Name) diff --git a/pkg/provisioners/kafka/main.go b/pkg/provisioners/kafka/main.go index 61609d069a2..db1e7c64fb2 100644 --- a/pkg/provisioners/kafka/main.go +++ b/pkg/provisioners/kafka/main.go @@ -6,7 +6,7 @@ import ( "os" "strings" - "github.com/go-logr/logr" + "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -16,6 +16,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/runtime/signals" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/provisioners" provisionerController "github.com/knative/eventing/pkg/provisioners/kafka/controller" "github.com/knative/eventing/pkg/provisioners/kafka/controller/channel" "github.com/knative/pkg/configmap" @@ -26,23 +27,23 @@ const ( BrokerConfigMapKey = "brokers" ) -var log = logf.Log.WithName("kafka-provisioner") - // SchemeFunc adds types to a Scheme. type SchemeFunc func(*runtime.Scheme) error // ProvideFunc adds a controller to a Manager. -type ProvideFunc func(mgr manager.Manager, config *provisionerController.KafkaProvisionerConfig, log logr.Logger) (controller.Controller, error) +type ProvideFunc func(mgr manager.Manager, config *provisionerController.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) func main() { flag.Parse() logf.SetLogger(logf.ZapLogger(false)) - entryLog := log.WithName("entrypoint") + + logger := provisioners.NewProvisionerLoggerFromConfig(provisioners.NewLoggingConfig()) + defer logger.Sync() // Setup a Manager mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) if err != nil { - entryLog.Error(err, "unable to run controller manager") + logger.Error(err, "unable to run controller manager") os.Exit(1) } @@ -64,13 +65,13 @@ func main() { provisionerConfig, err := getProvisionerConfig() if err != nil { - entryLog.Error(err, "unable to run controller manager") + logger.Error(err, "unable to run controller manager") os.Exit(1) } for _, provider := range providers { - if _, err := provider(mgr, provisionerConfig, log); err != nil { - entryLog.Error(err, "unable to run controller manager") + if _, err := provider(mgr, provisionerConfig, logger.Desugar()); err != nil { + logger.Error(err, "unable to run controller manager") os.Exit(1) } } diff --git a/pkg/provisioners/logging.go b/pkg/provisioners/logging.go new file mode 100644 index 00000000000..46dc9830151 --- /dev/null +++ b/pkg/provisioners/logging.go @@ -0,0 +1,63 @@ +/* + * Copyright 2018 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package provisioners + +import ( + "github.com/knative/pkg/logging" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const ( + provisionerLoggingComponent = "provisioner" +) + +// NewLoggingConfig creates a static logging configuration appropriate for a +// provisioner. All logging levels are set to Info. +func NewLoggingConfig() *logging.Config { + lc := &logging.Config{} + lc.LoggingConfig = `{ + "level": "info", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } + }` + lc.LoggingLevel = make(map[string]zapcore.Level) + lc.LoggingLevel[provisionerLoggingComponent] = zapcore.InfoLevel + return lc +} + +// NewProvisionerLoggerFromConfig creates a new zap logger for the provisioner component based +// on the provided configuration +func NewProvisionerLoggerFromConfig(config *logging.Config) *zap.SugaredLogger { + logger, _ := logging.NewLoggerFromConfig(config, provisionerLoggingComponent) + return logger +} From 356da32f7df60d42eb9910277f79760926651097 Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Wed, 10 Oct 2018 14:29:34 -0700 Subject: [PATCH 09/20] Provision Channel as Kafka Topic --- .../kafka/controller/channel/provider.go | 31 ++- .../kafka/controller/channel/reconcile.go | 64 +++++- .../controller/channel/reconcile_test.go | 197 ++++++++++++++++-- 3 files changed, 262 insertions(+), 30 deletions(-) diff --git a/pkg/provisioners/kafka/controller/channel/provider.go b/pkg/provisioners/kafka/controller/channel/provider.go index 4a661e58e90..3aead83c082 100644 --- a/pkg/provisioners/kafka/controller/channel/provider.go +++ b/pkg/provisioners/kafka/controller/channel/provider.go @@ -17,6 +17,9 @@ limitations under the License. package channel import ( + "fmt" + + "github.com/Shopify/sarama" "go.uber.org/zap" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,10 +40,11 @@ const ( ) type reconciler struct { - client client.Client - recorder record.EventRecorder - logger *zap.Logger - config *common.KafkaProvisionerConfig + client client.Client + recorder record.EventRecorder + logger *zap.Logger + config *common.KafkaProvisionerConfig + kafkaClusterAdmin sarama.ClusterAdmin } // Verify the struct implements reconcile.Reconciler @@ -49,11 +53,16 @@ var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Channel controller. func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Channel. + clusterAdmin, err := getKafkaAdminClient(config) + if err != nil { + return nil, fmt.Errorf("unable to build kafka admin client: %s", err) + } c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ - recorder: mgr.GetRecorder(controllerAgentName), - logger: logger, - config: config, + recorder: mgr.GetRecorder(controllerAgentName), + logger: logger, + config: config, + kafkaClusterAdmin: clusterAdmin, }, }) if err != nil { @@ -68,6 +77,14 @@ func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfi return c, nil } +func getKafkaAdminClient(config *common.KafkaProvisionerConfig) (sarama.ClusterAdmin, error) { + saramaConf := sarama.NewConfig() + saramaConf.Version = sarama.V1_1_0_0 + saramaConf.ClientID = controllerAgentName + + return sarama.NewClusterAdmin(config.Brokers, saramaConf) +} + func (r *reconciler) InjectClient(c client.Client) error { r.client = c return nil diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go index 9180aa02dad..c11ea8441c9 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -20,6 +20,8 @@ import ( "context" "fmt" + "github.com/Shopify/sarama" + "github.com/ghodss/yaml" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -30,6 +32,11 @@ import ( "go.uber.org/zap" ) +const ( + ArgumentNumPartitions = "NumPartitions" + DefaultNumPartitions = 1 +) + // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Channel resource // with the current status of the resource. @@ -104,12 +111,50 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { return nil } - // TODO: provision channel channel.Status.InitializeConditions() - channel.Status.MarkNotProvisioned("NotProvisioned", "NotImplemented") + if err := r.provisionChannel(channel); err != nil { + channel.Status.MarkNotProvisioned("NotProvisioned", "error while provisioning: %s", err) + return err + } + channel.Status.MarkProvisioned() return nil } +func (r *reconciler) provisionChannel(channel *v1alpha1.Channel) error { + topicName := topicName(channel) + r.logger.Info("provisioning topic on kafka cluster", zap.String("topic", topicName)) + + partitions := DefaultNumPartitions + + if channel.Spec.Arguments != nil { + var err error + arguments, err := unmarshalArguments(channel.Spec.Arguments.Raw) + if err != nil { + return err + } + if num, ok := arguments[ArgumentNumPartitions]; ok { + parsedNum, ok := num.(float64) + if !ok { + return fmt.Errorf("could not parse argument %s for channel %s", ArgumentNumPartitions, fmt.Sprintf("%s/%s", channel.Namespace, channel.Name)) + } + partitions = int(parsedNum) + } + } + + err := r.kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ + ReplicationFactor: 1, + NumPartitions: int32(partitions), + }, false) + if err == sarama.ErrTopicAlreadyExists { + return nil + } else if err != nil { + r.logger.Error("error creating topic", zap.String("topic", topicName), zap.Error(err)) + } else { + r.logger.Info("successfully created topic", zap.String("topic", topicName)) + } + return err +} + func (r *reconciler) getClusterProvisioner() (*v1alpha1.ClusterProvisioner, error) { clusterProvisioner := &v1alpha1.ClusterProvisioner{} objKey := client.ObjectKey{ @@ -139,3 +184,18 @@ func (r *reconciler) updateStatus(channel *v1alpha1.Channel) (*v1alpha1.Channel, } return newChannel, nil } + +func topicName(channel *v1alpha1.Channel) string { + return fmt.Sprintf("%s.%s", channel.Namespace, channel.Name) +} + +// unmarshalArguments unmarshal's a json/yaml serialized input and returns a map structure +func unmarshalArguments(bytes []byte) (map[string]interface{}, error) { + arguments := make(map[string]interface{}) + if len(bytes) > 0 { + if err := yaml.Unmarshal(bytes, &arguments); err != nil { + return nil, err + } + } + return arguments, nil +} diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go index d722615e999..c3d5eb118ce 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -21,6 +21,9 @@ import ( "fmt" "testing" + "github.com/Shopify/sarama" + "github.com/ghodss/yaml" + "github.com/google/go-cmp/cmp" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,9 +63,56 @@ var mockFetchError = controllertesting.Mocks{ }, } +type mockClusterAdmin struct { + mockCreateTopicFunc func(topic string, detail *sarama.TopicDetail, validateOnly bool) error +} + +func (ca *mockClusterAdmin) CreateTopic(topic string, detail *sarama.TopicDetail, validateOnly bool) error { + if ca.mockCreateTopicFunc != nil { + return ca.mockCreateTopicFunc(topic, detail, validateOnly) + } + return nil +} + +func (ca *mockClusterAdmin) Close() error { + return nil +} + +func (ca *mockClusterAdmin) DeleteTopic(topic string) error { + return nil +} + +func (ca *mockClusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error { + return nil +} + +func (ca *mockClusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error { + return nil +} + +func (ca *mockClusterAdmin) DescribeConfig(resource sarama.ConfigResource) ([]sarama.ConfigEntry, error) { + return nil, nil +} + +func (ca *mockClusterAdmin) AlterConfig(resourceType sarama.ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error { + return nil +} + +func (ca *mockClusterAdmin) CreateACL(resource sarama.Resource, acl sarama.Acl) error { + return nil +} + +func (ca *mockClusterAdmin) ListAcls(filter sarama.AclFilter) ([]sarama.ResourceAcls, error) { + return nil, nil +} + +func (ca *mockClusterAdmin) DeleteACL(filter sarama.AclFilter, validateOnly bool) ([]sarama.MatchingAcl, error) { + return nil, nil +} + var testCases = []controllertesting.TestCase{ { - Name: "new channel with valid provisioner: adds not provisioned status", + Name: "new channel with valid provisioner: adds provisioned status", InitialState: []runtime.Object{ getNewClusterProvisioner(clusterProvisionerName, true), getNewChannel(channelName, clusterProvisionerName), @@ -70,7 +120,7 @@ var testCases = []controllertesting.TestCase{ ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, WantPresent: []runtime.Object{ - getNewChannelNotProvisionedStatus(channelName, clusterProvisionerName, "NotImplemented"), + getNewChannelProvisionedStatus(channelName, clusterProvisionerName), }, IgnoreTimes: true, }, @@ -156,16 +206,118 @@ func TestAllCases(t *testing.T) { c := tc.GetClient() logger := provisioners.NewProvisionerLoggerFromConfig(provisioners.NewLoggingConfig()) r := &reconciler{ - client: c, - recorder: recorder, - logger: logger.Desugar(), - config: getControllerConfig(), + client: c, + recorder: recorder, + logger: logger.Desugar(), + config: getControllerConfig(), + kafkaClusterAdmin: &mockClusterAdmin{}, } t.Logf("Running test %s", tc.Name) t.Run(tc.Name, tc.Runner(t, r, c)) } } +func TestProvisionChannel(t *testing.T) { + provisionTestCases := []struct { + name string + c *eventingv1alpha1.Channel + wantTopicName string + wantTopicDetail *sarama.TopicDetail + mockError error + wantError string + }{ + { + name: "no channel arguments - uses default", + c: getNewChannel(channelName, clusterProvisionerName), + wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), + wantTopicDetail: &sarama.TopicDetail{ + ReplicationFactor: 1, + NumPartitions: 1, + }, + }, + { + name: "with unknown channel arguments - uses default", + c: getNewChannelWithArgs(channelName, map[string]interface{}{"testing": "testing"}), + wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), + wantTopicDetail: &sarama.TopicDetail{ + ReplicationFactor: 1, + NumPartitions: 1, + }, + }, + { + name: "with invalid channel arguments - errors", + c: getNewChannelWithArgs(channelName, map[string]interface{}{ArgumentNumPartitions: "invalid"}), + wantError: fmt.Sprintf("could not parse argument %s for channel test-namespace/test-channel", ArgumentNumPartitions), + }, + { + name: "with unmarshallable channel arguments - errors", + c: func() *eventingv1alpha1.Channel { + channel := getNewChannel(channelName, clusterProvisionerName) + channel.Spec.Arguments = &runtime.RawExtension{ + Raw: []byte("invalid"), + } + return channel + }(), + wantError: "error unmarshaling JSON: json: cannot unmarshal string into Go value of type map[string]interface {}", + }, + { + name: "with valid channel arguments", + c: getNewChannelWithArgs(channelName, map[string]interface{}{ArgumentNumPartitions: 2}), + wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), + wantTopicDetail: &sarama.TopicDetail{ + ReplicationFactor: 1, + NumPartitions: 2, + }, + }, + { + name: "topic already exists - no error", + c: getNewChannelWithArgs(channelName, map[string]interface{}{ArgumentNumPartitions: 2}), + wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), + wantTopicDetail: &sarama.TopicDetail{ + ReplicationFactor: 1, + NumPartitions: 2, + }, + mockError: sarama.ErrTopicAlreadyExists, + }, + { + name: "error creating topic", + c: getNewChannelWithArgs(channelName, map[string]interface{}{ArgumentNumPartitions: 2}), + wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), + wantTopicDetail: &sarama.TopicDetail{ + ReplicationFactor: 1, + NumPartitions: 2, + }, + mockError: fmt.Errorf("unknown sarama error"), + wantError: "unknown sarama error", + }} + + for _, tc := range provisionTestCases { + t.Logf("running test %s", tc.name) + logger := provisioners.NewProvisionerLoggerFromConfig(provisioners.NewLoggingConfig()) + r := &reconciler{ + logger: logger.Desugar(), + kafkaClusterAdmin: &mockClusterAdmin{mockCreateTopicFunc: func(topic string, detail *sarama.TopicDetail, validateOnly bool) error { + if topic != tc.wantTopicName { + t.Errorf("expected topic name: %+v got: %+v", tc.wantTopicName, topic) + } + if diff := cmp.Diff(tc.wantTopicDetail, detail); diff != "" { + t.Errorf("unexpected detail (-want, +got) = %v", diff) + } + return tc.mockError + }}, + } + err := r.provisionChannel(tc.c) + var got string + if err != nil { + got = err.Error() + } + if diff := cmp.Diff(tc.wantError, got); diff != "" { + t.Errorf("unexpected error (-want, +got) = %v", diff) + } + } + +} + func getNewChannelNoProvisioner(name string) *eventingv1alpha1.Channel { channel := &eventingv1alpha1.Channel{ TypeMeta: channelType(), @@ -196,23 +348,26 @@ func getNewChannel(name, provisioner string) *eventingv1alpha1.Channel { return channel } +func getNewChannelWithArgs(name string, args map[string]interface{}) *eventingv1alpha1.Channel { + c := getNewChannelNoProvisioner(name) + bytes, _ := yaml.Marshal(args) + c.Spec.Arguments = &runtime.RawExtension{ + Raw: bytes, + } + return c +} + +func getNewChannelProvisionedStatus(name, provisioner string) *eventingv1alpha1.Channel { + c := getNewChannel(name, provisioner) + c.Status.InitializeConditions() + c.Status.MarkProvisioned() + return c +} + func getNewChannelNotProvisionedStatus(name, provisioner, msg string) *eventingv1alpha1.Channel { c := getNewChannel(name, provisioner) - c.Status = eventingv1alpha1.ChannelStatus{ - Conditions: []duckv1alpha1.Condition{ - { - Type: eventingv1alpha1.ChannelConditionProvisioned, - Status: corev1.ConditionFalse, - Reason: "NotProvisioned", - Message: msg}, - { - Type: eventingv1alpha1.ChannelConditionReady, - Status: corev1.ConditionFalse, - Reason: "NotProvisioned", - Message: msg, - }, - }, - } + c.Status.InitializeConditions() + c.Status.MarkNotProvisioned("NotProvisioned", msg) return c } From bc5515d706c02dd8ddead33a2ff9f7a36988717c Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Tue, 16 Oct 2018 14:58:09 -0700 Subject: [PATCH 10/20] Deprovision Channel --- .../kafka/controller/channel/provider.go | 1 - .../kafka/controller/channel/reconcile.go | 92 ++++++++++----- .../controller/channel/reconcile_test.go | 106 +++++++++++++++--- 3 files changed, 153 insertions(+), 46 deletions(-) diff --git a/pkg/provisioners/kafka/controller/channel/provider.go b/pkg/provisioners/kafka/controller/channel/provider.go index 3aead83c082..d5ab88be810 100644 --- a/pkg/provisioners/kafka/controller/channel/provider.go +++ b/pkg/provisioners/kafka/controller/channel/provider.go @@ -81,7 +81,6 @@ func getKafkaAdminClient(config *common.KafkaProvisionerConfig) (sarama.ClusterA saramaConf := sarama.NewConfig() saramaConf.Version = sarama.V1_1_0_0 saramaConf.ClientID = controllerAgentName - return sarama.NewClusterAdmin(config.Brokers, saramaConf) } diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go index c11ea8441c9..7dc206cc91d 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -22,17 +22,20 @@ import ( "github.com/Shopify/sarama" "github.com/ghodss/yaml" + "go.uber.org/zap" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "go.uber.org/zap" ) const ( + finalizerName = controllerAgentName + ArgumentNumPartitions = "NumPartitions" DefaultNumPartitions = 1 ) @@ -41,6 +44,7 @@ const ( // converge the two. It then updates the Status block of the Channel resource // with the current status of the resource. func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) { + ctx := context.TODO() r.logger.Info("Reconciling channel", zap.Any("request", request)) channel := &v1alpha1.Channel{} err := r.client.Get(context.TODO(), request.NamespacedName, channel) @@ -71,26 +75,20 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err return reconcile.Result{}, nil } - original := channel.DeepCopy() + newChannel := channel.DeepCopy() if clusterProvisioner.Status.IsReady() { // Reconcile this copy of the Channel and then write back any status // updates regardless of whether the reconcile error out. - err = r.reconcile(channel) + err = r.reconcile(newChannel) } else { - channel.Status.MarkNotProvisioned("NotProvisioned", "ClusterProvisioner %s is not ready", clusterProvisioner.Name) + newChannel.Status.MarkNotProvisioned("NotProvisioned", "ClusterProvisioner %s is not ready", clusterProvisioner.Name) err = fmt.Errorf("ClusterProvisioner %s is not ready", clusterProvisioner.Name) } - if !equality.Semantic.DeepEqual(original.Status, channel.Status) { - // If we didn't change anything then don't call updateStatus. - // This is important because the copy we loaded from the informer's - // cache may be stale and we don't want to overwrite a prior update - // to status with this stale state. - if _, err := r.updateStatus(channel); err != nil { - r.logger.Info("failed to update channel status", zap.Error(err)) - return reconcile.Result{}, err - } + if err := r.updateChannel(ctx, newChannel); err != nil { + r.logger.Info("failed to update channel status", zap.Error(err)) + return reconcile.Result{}, err } // Requeue if the resource is not ready: @@ -107,10 +105,14 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { deletionTimestamp := accessor.GetDeletionTimestamp() if deletionTimestamp != nil { r.logger.Info(fmt.Sprintf("DeletionTimestamp: %v", deletionTimestamp)) - //TODO: Handle deletion + if err := r.deprovisionChannel(channel); err != nil { + return err + } + r.removeFinalizer(channel) return nil } + r.addFinalizer(channel) channel.Status.InitializeConditions() if err := r.provisionChannel(channel); err != nil { channel.Status.MarkNotProvisioned("NotProvisioned", "error while provisioning: %s", err) @@ -122,7 +124,7 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { func (r *reconciler) provisionChannel(channel *v1alpha1.Channel) error { topicName := topicName(channel) - r.logger.Info("provisioning topic on kafka cluster", zap.String("topic", topicName)) + r.logger.Info("creating topic on kafka cluster", zap.String("topic", topicName)) partitions := DefaultNumPartitions @@ -155,34 +157,66 @@ func (r *reconciler) provisionChannel(channel *v1alpha1.Channel) error { return err } +func (r *reconciler) deprovisionChannel(channel *v1alpha1.Channel) error { + topicName := topicName(channel) + r.logger.Info("deleting topic on kafka cluster", zap.String("topic", topicName)) + + err := r.kafkaClusterAdmin.DeleteTopic(topicName) + if err == sarama.ErrUnknownTopicOrPartition { + return nil + } else if err != nil { + r.logger.Error("error deleting topic", zap.String("topic", topicName), zap.Error(err)) + } else { + r.logger.Info("successfully deleted topic %s", zap.String("topic", topicName)) + } + return err +} + func (r *reconciler) getClusterProvisioner() (*v1alpha1.ClusterProvisioner, error) { clusterProvisioner := &v1alpha1.ClusterProvisioner{} objKey := client.ObjectKey{ Name: r.config.Name, } - if err := r.client.Get(context.TODO(), objKey, clusterProvisioner); err != nil { + if err := r.client.Get(context.Background(), objKey, clusterProvisioner); err != nil { return nil, err } return clusterProvisioner, nil } -func (r *reconciler) updateStatus(channel *v1alpha1.Channel) (*v1alpha1.Channel, error) { - newChannel := &v1alpha1.Channel{} - err := r.client.Get(context.TODO(), client.ObjectKey{Namespace: channel.Namespace, Name: channel.Name}, newChannel) - +func (r *reconciler) updateChannel(ctx context.Context, u *v1alpha1.Channel) error { + channel := &v1alpha1.Channel{} + err := r.client.Get(ctx, client.ObjectKey{Namespace: u.Namespace, Name: u.Name}, channel) if err != nil { - return nil, err + return err } - newChannel.Status = channel.Status - // Until #38113 is merged, we must use Update instead of UpdateStatus to - // update the Status block of the Channel resource. UpdateStatus will not - // allow changes to the Spec of the resource, which is ideal for ensuring - // nothing other than resource status has been updated. - if err = r.client.Update(context.TODO(), newChannel); err != nil { - return nil, err + updated := false + if !equality.Semantic.DeepEqual(channel.Finalizers, u.Finalizers) { + channel.SetFinalizers(u.ObjectMeta.Finalizers) + updated = true + } + + if !equality.Semantic.DeepEqual(channel.Status, u.Status) { + channel.Status = u.Status + updated = true } - return newChannel, nil + + if updated == false { + return nil + } + return r.client.Update(ctx, channel) +} + +func (r *reconciler) addFinalizer(channel *v1alpha1.Channel) { + finalizers := sets.NewString(channel.Finalizers...) + finalizers.Insert(finalizerName) + channel.Finalizers = finalizers.List() +} + +func (r *reconciler) removeFinalizer(channel *v1alpha1.Channel) { + finalizers := sets.NewString(channel.Finalizers...) + finalizers.Delete(finalizerName) + channel.Finalizers = finalizers.List() } func topicName(channel *v1alpha1.Channel) string { diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go index c3d5eb118ce..e7d836f161b 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -46,6 +46,10 @@ const ( testNS = "test-namespace" ) +var ( + deletedTs = metav1.Now().Rfc3339Copy() +) + func init() { // Add types to scheme eventingv1alpha1.AddToScheme(scheme.Scheme) @@ -65,6 +69,7 @@ var mockFetchError = controllertesting.Mocks{ type mockClusterAdmin struct { mockCreateTopicFunc func(topic string, detail *sarama.TopicDetail, validateOnly bool) error + mockDeleteTopicFunc func(topic string) error } func (ca *mockClusterAdmin) CreateTopic(topic string, detail *sarama.TopicDetail, validateOnly bool) error { @@ -79,6 +84,9 @@ func (ca *mockClusterAdmin) Close() error { } func (ca *mockClusterAdmin) DeleteTopic(topic string) error { + if ca.mockDeleteTopicFunc != nil { + return ca.mockDeleteTopicFunc(topic) + } return nil } @@ -197,6 +205,17 @@ var testCases = []controllertesting.TestCase{ getNewChannel(channelName, clusterProvisionerName), }, }, + { + Name: "deleted channel", + InitialState: []runtime.Object{ + getNewClusterProvisioner(clusterProvisionerName, true), + getNewChannelDeleted(channelName, clusterProvisionerName), + }, + ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), + WantResult: reconcile.Result{}, + WantPresent: []runtime.Object{}, + IgnoreTimes: true, + }, } func TestAllCases(t *testing.T) { @@ -227,7 +246,7 @@ func TestProvisionChannel(t *testing.T) { wantError string }{ { - name: "no channel arguments - uses default", + name: "provision with no channel arguments - uses default", c: getNewChannel(channelName, clusterProvisionerName), wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), wantTopicDetail: &sarama.TopicDetail{ @@ -236,7 +255,7 @@ func TestProvisionChannel(t *testing.T) { }, }, { - name: "with unknown channel arguments - uses default", + name: "provision with unknown channel arguments - uses default", c: getNewChannelWithArgs(channelName, map[string]interface{}{"testing": "testing"}), wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), wantTopicDetail: &sarama.TopicDetail{ @@ -245,12 +264,12 @@ func TestProvisionChannel(t *testing.T) { }, }, { - name: "with invalid channel arguments - errors", + name: "provision with invalid channel arguments - errors", c: getNewChannelWithArgs(channelName, map[string]interface{}{ArgumentNumPartitions: "invalid"}), wantError: fmt.Sprintf("could not parse argument %s for channel test-namespace/test-channel", ArgumentNumPartitions), }, { - name: "with unmarshallable channel arguments - errors", + name: "provision with unmarshallable channel arguments - errors", c: func() *eventingv1alpha1.Channel { channel := getNewChannel(channelName, clusterProvisionerName) channel.Spec.Arguments = &runtime.RawExtension{ @@ -261,7 +280,7 @@ func TestProvisionChannel(t *testing.T) { wantError: "error unmarshaling JSON: json: cannot unmarshal string into Go value of type map[string]interface {}", }, { - name: "with valid channel arguments", + name: "provision with valid channel arguments", c: getNewChannelWithArgs(channelName, map[string]interface{}{ArgumentNumPartitions: 2}), wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), wantTopicDetail: &sarama.TopicDetail{ @@ -270,7 +289,7 @@ func TestProvisionChannel(t *testing.T) { }, }, { - name: "topic already exists - no error", + name: "provision but topic already exists - no error", c: getNewChannelWithArgs(channelName, map[string]interface{}{ArgumentNumPartitions: 2}), wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), wantTopicDetail: &sarama.TopicDetail{ @@ -280,7 +299,7 @@ func TestProvisionChannel(t *testing.T) { mockError: sarama.ErrTopicAlreadyExists, }, { - name: "error creating topic", + name: "provision but error creating topic", c: getNewChannelWithArgs(channelName, map[string]interface{}{ArgumentNumPartitions: 2}), wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), wantTopicDetail: &sarama.TopicDetail{ @@ -296,15 +315,13 @@ func TestProvisionChannel(t *testing.T) { logger := provisioners.NewProvisionerLoggerFromConfig(provisioners.NewLoggingConfig()) r := &reconciler{ logger: logger.Desugar(), - kafkaClusterAdmin: &mockClusterAdmin{mockCreateTopicFunc: func(topic string, detail *sarama.TopicDetail, validateOnly bool) error { - if topic != tc.wantTopicName { - t.Errorf("expected topic name: %+v got: %+v", tc.wantTopicName, topic) - } - if diff := cmp.Diff(tc.wantTopicDetail, detail); diff != "" { - t.Errorf("unexpected detail (-want, +got) = %v", diff) - } - return tc.mockError - }}, + kafkaClusterAdmin: &mockClusterAdmin{ + mockCreateTopicFunc: func(topic string, detail *sarama.TopicDetail, validateOnly bool) error { + if topic != tc.wantTopicName { + t.Errorf("expected topic name: %+v got: %+v", tc.wantTopicName, topic) + } + return tc.mockError + }}, } err := r.provisionChannel(tc.c) var got string @@ -315,7 +332,57 @@ func TestProvisionChannel(t *testing.T) { t.Errorf("unexpected error (-want, +got) = %v", diff) } } +} +func TestDeprovisionChannel(t *testing.T) { + deprovisionTestCases := []struct { + name string + c *eventingv1alpha1.Channel + wantTopicName string + mockError error + wantError string + }{ + { + name: "deprovision channel - unknown error", + c: getNewChannel(channelName, clusterProvisionerName), + wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), + mockError: fmt.Errorf("unknown sarama error"), + wantError: "unknown sarama error", + }, + { + name: "deprovision channel - topic already deleted", + c: getNewChannel(channelName, clusterProvisionerName), + wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), + mockError: sarama.ErrUnknownTopicOrPartition, + }, + { + name: "deprovision channel - success", + c: getNewChannel(channelName, clusterProvisionerName), + wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), + }} + + for _, tc := range deprovisionTestCases { + t.Logf("running test %s", tc.name) + logger := provisioners.NewProvisionerLoggerFromConfig(provisioners.NewLoggingConfig()) + r := &reconciler{ + logger: logger.Desugar(), + kafkaClusterAdmin: &mockClusterAdmin{ + mockDeleteTopicFunc: func(topic string) error { + if topic != tc.wantTopicName { + t.Errorf("expected topic name: %+v got: %+v", tc.wantTopicName, topic) + } + return tc.mockError + }}, + } + err := r.deprovisionChannel(tc.c) + var got string + if err != nil { + got = err.Error() + } + if diff := cmp.Diff(tc.wantError, got); diff != "" { + t.Errorf("unexpected error (-want, +got) = %v", diff) + } + } } func getNewChannelNoProvisioner(name string) *eventingv1alpha1.Channel { @@ -361,6 +428,13 @@ func getNewChannelProvisionedStatus(name, provisioner string) *eventingv1alpha1. c := getNewChannel(name, provisioner) c.Status.InitializeConditions() c.Status.MarkProvisioned() + c.Finalizers = []string{finalizerName} + return c +} + +func getNewChannelDeleted(name, provisioner string) *eventingv1alpha1.Channel { + c := getNewChannelProvisionedStatus(name, provisioner) + c.DeletionTimestamp = &deletedTs return c } From 86dd3f8eb15f70c41d39eba178581cfe7e14755d Mon Sep 17 00:00:00 2001 From: Matthias Wessendorf Date: Mon, 29 Oct 2018 16:08:34 -0700 Subject: [PATCH 11/20] Merge matzew/try_kafka_provisioner into try_kafka_provisioner --- .../provisioners/kafka/kafka-provisioner.yaml | 34 +++--- .../inmemory/channel/reconcile_test.go | 18 ++-- .../clusterchannelprovisioner/reconcile.go | 6 +- .../reconcile_test.go | 14 ++- .../kafka/controller/channel/provider.go | 5 +- .../kafka/controller/channel/reconcile.go | 24 ++--- .../controller/channel/reconcile_test.go | 101 ++++++++++-------- pkg/provisioners/kafka/controller/provider.go | 7 +- .../kafka/controller/reconcile.go | 21 ++-- .../kafka/controller/reconcile_test.go | 87 ++++++++------- pkg/provisioners/kafka/main.go | 21 ++-- 11 files changed, 176 insertions(+), 162 deletions(-) diff --git a/config/provisioners/kafka/kafka-provisioner.yaml b/config/provisioners/kafka/kafka-provisioner.yaml index 89b95c41c9e..3ee163bba96 100644 --- a/config/provisioners/kafka/kafka-provisioner.yaml +++ b/config/provisioners/kafka/kafka-provisioner.yaml @@ -13,50 +13,50 @@ # limitations under the License. apiVersion: eventing.knative.dev/v1alpha1 -kind: ClusterProvisioner +kind: ClusterChannelProvisioner metadata: - name: kafka + name: kafka-channel spec: reconciles: - group: eventing.knative.dev + group: eventing.knative.dev/v1alpha1 kind: Channel --- apiVersion: v1 kind: ServiceAccount metadata: - name: kafka-provisioner + name: kafka-channel-controller namespace: knative-eventing --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: kafka-provisioner + name: kafka-channel-controller rules: - apiGroups: ["eventing.knative.dev"] - resources: ["clusterprovisioners", "channels"] + resources: ["clusterchannelprovisioners", "channels"] verbs: ["get", "watch", "list", "update"] --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: - name: kafka-provisioner-manage + name: kafka-channel-controller-manage subjects: - kind: ServiceAccount - name: kafka-provisioner + name: kafka-channel-controller namespace: knative-eventing roleRef: kind: ClusterRole - name: kafka-provisioner + name: kafka-channel-controller apiGroup: rbac.authorization.k8s.io --- apiVersion: v1 kind: ConfigMap metadata: - name: kafka-provisioner-config + name: kafka-channel-controller-config namespace: knative-eventing data: # Name of the provisioner that this controller represents @@ -68,23 +68,23 @@ data: apiVersion: apps/v1beta1 kind: Deployment metadata: - name: kafka-provisioner + name: kafka-channel-controller namespace: knative-eventing spec: replicas: 1 template: metadata: labels: - app: kafka-provisioner + app: kafka-channel-controller spec: - serviceAccountName: kafka-provisioner + serviceAccountName: kafka-channel-controller containers: - - name: kafka-provisioner-controller + - name: kafka-channel-controller-controller image: github.com/knative/eventing/pkg/provisioners/kafka volumeMounts: - - name: kafka-provisioner-config + - name: kafka-channel-controller-config mountPath: /etc/config-provisioner volumes: - - name: kafka-provisioner-config + - name: kafka-channel-controller-config configMap: - name: kafka-provisioner-config + name: kafka-channel-controller-config diff --git a/pkg/controller/eventing/inmemory/channel/reconcile_test.go b/pkg/controller/eventing/inmemory/channel/reconcile_test.go index f3a3bda8df0..bde7b039744 100644 --- a/pkg/controller/eventing/inmemory/channel/reconcile_test.go +++ b/pkg/controller/eventing/inmemory/channel/reconcile_test.go @@ -111,8 +111,10 @@ var ( Kind: "Channel", }, Spec: eventingv1alpha1.ChannelSpec{ - Provisioner: &corev1.ObjectReference{ - Name: ccpName, + Provisioner: &eventingv1alpha1.ProvisionerReference{ + Ref: &corev1.ObjectReference{ + Name: ccpName, + }, }, Channelable: &eventingduck.Channelable{ Subscribers: []eventingduck.ChannelSubscriberSpec{ @@ -160,8 +162,10 @@ var ( Kind: "Channel", }, Spec: eventingv1alpha1.ChannelSpec{ - Provisioner: &corev1.ObjectReference{ - Name: ccpName, + Provisioner: &eventingv1alpha1.ProvisionerReference{ + Ref: &corev1.ObjectReference{ + Name: ccpName, + }, }, Channelable: &eventingduck.Channelable{ Subscribers: []eventingduck.ChannelSubscriberSpec{ @@ -466,8 +470,10 @@ func makeChannel() *eventingv1alpha1.Channel { UID: cUID, }, Spec: eventingv1alpha1.ChannelSpec{ - Provisioner: &corev1.ObjectReference{ - Name: ccpName, + Provisioner: &eventingv1alpha1.ProvisionerReference{ + Ref: &corev1.ObjectReference{ + Name: ccpName, + }, }, }, } diff --git a/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile.go b/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile.go index 10a8dac8600..2fc32debef5 100644 --- a/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile.go +++ b/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile.go @@ -105,9 +105,9 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err // IsControlled determines if the in-memory Channel Controller should control (and therefore // reconcile) a given object, based on that object's ClusterChannelProvisioner reference. -func IsControlled(ref *corev1.ObjectReference) bool { - if ref != nil { - return shouldReconcile(ref.Namespace, ref.Name) +func IsControlled(ref *eventingv1alpha1.ProvisionerReference) bool { + if ref != nil && ref.Ref != nil { + return shouldReconcile(ref.Ref.Namespace, ref.Ref.Name) } return false } diff --git a/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile_test.go b/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile_test.go index b4f9945cc2f..099e85c6a90 100644 --- a/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile_test.go +++ b/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile_test.go @@ -75,13 +75,19 @@ func TestInjectClient(t *testing.T) { func TestIsControlled(t *testing.T) { testCases := map[string]struct { - ref *corev1.ObjectReference + ref *eventingv1alpha1.ProvisionerReference isControlled bool }{ "nil": { ref: nil, isControlled: false, }, + "ref nil": { + ref: &eventingv1alpha1.ProvisionerReference{ + Ref: nil, + }, + isControlled: false, + }, "wrong namespace": { ref: &corev1.ObjectReference{ Namespace: "other", @@ -90,8 +96,10 @@ func TestIsControlled(t *testing.T) { isControlled: false, }, "wrong name": { - ref: &corev1.ObjectReference{ - Name: "other-name", + ref: &eventingv1alpha1.ProvisionerReference{ + Ref: &corev1.ObjectReference{ + Name: "other-name", + }, }, isControlled: false, }, diff --git a/pkg/provisioners/kafka/controller/channel/provider.go b/pkg/provisioners/kafka/controller/channel/provider.go index d5ab88be810..07e6ea4ca12 100644 --- a/pkg/provisioners/kafka/controller/channel/provider.go +++ b/pkg/provisioners/kafka/controller/channel/provider.go @@ -20,6 +20,8 @@ import ( "fmt" "github.com/Shopify/sarama" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + common "github.com/knative/eventing/pkg/provisioners/kafka/controller" "go.uber.org/zap" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" @@ -28,9 +30,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - common "github.com/knative/eventing/pkg/provisioners/kafka/controller" ) const ( diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go index 6fd03b47474..8ddcdf9bec4 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -22,6 +22,7 @@ import ( "github.com/Shopify/sarama" "github.com/ghodss/yaml" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "go.uber.org/zap" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" @@ -29,8 +30,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" ) const ( @@ -66,26 +65,24 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err // Skip channel not managed by this provisioner provisionerRef := channel.Spec.Provisioner.Ref - clusterProvisioner, err := r.getClusterProvisioner() + clusterChannelProvisioner, err := r.getClusterChannelProvisioner() if err != nil { return reconcile.Result{}, err } - if provisionerRef.Name != clusterProvisioner.Name || provisionerRef.Namespace != clusterProvisioner.Namespace { + if provisionerRef.Name != clusterChannelProvisioner.Name || provisionerRef.Namespace != clusterChannelProvisioner.Namespace { return reconcile.Result{}, nil } newChannel := channel.DeepCopy() - newChannel.Status.InitializeConditions() - - if clusterProvisioner.Status.IsReady() { + if clusterChannelProvisioner.Status.IsReady() { // Reconcile this copy of the Channel and then write back any status // updates regardless of whether the reconcile error out. err = r.reconcile(newChannel) } else { - newChannel.Status.MarkNotProvisioned("NotProvisioned", "ClusterProvisioner %s is not ready", clusterProvisioner.Name) - err = fmt.Errorf("ClusterProvisioner %s is not ready", clusterProvisioner.Name) + newChannel.Status.MarkNotProvisioned("NotProvisioned", "ClusterChannelProvisioner %s is not ready", clusterChannelProvisioner.Name) + err = fmt.Errorf("ClusterChannelProvisioner %s is not ready", clusterChannelProvisioner.Name) } if err := r.updateChannel(ctx, newChannel); err != nil { @@ -115,6 +112,7 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { } r.addFinalizer(channel) + channel.Status.InitializeConditions() if err := r.provisionChannel(channel); err != nil { channel.Status.MarkNotProvisioned("NotProvisioned", "error while provisioning: %s", err) return err @@ -173,15 +171,15 @@ func (r *reconciler) deprovisionChannel(channel *v1alpha1.Channel) error { return err } -func (r *reconciler) getClusterProvisioner() (*v1alpha1.ClusterProvisioner, error) { - clusterProvisioner := &v1alpha1.ClusterProvisioner{} +func (r *reconciler) getClusterChannelProvisioner() (*v1alpha1.ClusterChannelProvisioner, error) { + clusterChannelProvisioner := &v1alpha1.ClusterChannelProvisioner{} objKey := client.ObjectKey{ Name: r.config.Name, } - if err := r.client.Get(context.Background(), objKey, clusterProvisioner); err != nil { + if err := r.client.Get(context.Background(), objKey, clusterChannelProvisioner); err != nil { return nil, err } - return clusterProvisioner, nil + return clusterChannelProvisioner, nil } func (r *reconciler) updateChannel(ctx context.Context, u *v1alpha1.Channel) error { diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go index 7de86868df1..a304f747b68 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -24,6 +24,12 @@ import ( "github.com/Shopify/sarama" "github.com/ghodss/yaml" "github.com/google/go-cmp/cmp" + "github.com/knative/eventing/pkg/apis/eventing" + eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + controllertesting "github.com/knative/eventing/pkg/controller/testing" + "github.com/knative/eventing/pkg/provisioners" + "github.com/knative/eventing/pkg/provisioners/kafka/controller" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -31,18 +37,12 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/knative/eventing/pkg/apis/eventing" - eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - controllertesting "github.com/knative/eventing/pkg/controller/testing" - "github.com/knative/eventing/pkg/provisioners" - "github.com/knative/eventing/pkg/provisioners/kafka/controller" ) const ( - channelName = "test-channel" - clusterProvisionerName = "kafka" - testNS = "test-namespace" + channelName = "test-channel" + clusterChannelProvisionerName = "kafka" + testNS = "test-namespace" ) var ( @@ -121,47 +121,47 @@ var testCases = []controllertesting.TestCase{ { Name: "new channel with valid provisioner: adds provisioned status", InitialState: []runtime.Object{ - getNewClusterProvisioner(clusterProvisionerName, true), - getNewChannel(channelName, clusterProvisionerName), + getNewClusterChannelProvisioner(clusterChannelProvisionerName, true), + getNewChannel(channelName, clusterChannelProvisionerName), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, WantPresent: []runtime.Object{ - getNewChannelProvisionedStatus(channelName, clusterProvisionerName), + getNewChannelProvisionedStatus(channelName, clusterChannelProvisionerName), }, IgnoreTimes: true, }, { Name: "new channel with provisioner not ready: error", InitialState: []runtime.Object{ - getNewClusterProvisioner(clusterProvisionerName, false), - getNewChannel(channelName, clusterProvisionerName), + getNewClusterChannelProvisioner(clusterChannelProvisionerName, false), + getNewChannel(channelName, clusterChannelProvisionerName), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, - WantErrMsg: "ClusterProvisioner " + clusterProvisionerName + " is not ready", + WantErrMsg: "ClusterChannelProvisioner " + clusterChannelProvisionerName + " is not ready", WantPresent: []runtime.Object{ - getNewChannelNotProvisionedStatus(channelName, clusterProvisionerName, - "ClusterProvisioner "+clusterProvisionerName+" is not ready"), + getNewChannelNotProvisionedStatus(channelName, clusterChannelProvisionerName, + "ClusterChannelProvisioner "+clusterChannelProvisionerName+" is not ready"), }, IgnoreTimes: true, }, { Name: "new channel with missing provisioner: error", InitialState: []runtime.Object{ - getNewChannel(channelName, clusterProvisionerName), + getNewChannel(channelName, clusterChannelProvisionerName), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, - WantErrMsg: "clusterprovisioners.eventing.knative.dev \"" + clusterProvisionerName + "\" not found", + WantErrMsg: "clusterChannelProvisioners.eventing.knative.dev \"" + clusterChannelProvisionerName + "\" not found", IgnoreTimes: true, }, { Name: "new channel with provisioner not managed by this controller: skips channel", InitialState: []runtime.Object{ getNewChannel(channelName, "not-our-provisioner"), - getNewClusterProvisioner("not-our-provisioner", true), - getNewClusterProvisioner(clusterProvisionerName, true), + getNewClusterChannelProvisioner("not-our-provisioner", true), + getNewClusterChannelProvisioner(clusterChannelProvisionerName, true), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, @@ -193,23 +193,22 @@ var testCases = []controllertesting.TestCase{ { Name: "error fetching channel", InitialState: []runtime.Object{ - getNewClusterProvisioner(clusterProvisionerName, true), - getNewChannel(channelName, clusterProvisionerName), + getNewClusterChannelProvisioner(clusterChannelProvisionerName, true), + getNewChannel(channelName, clusterChannelProvisionerName), }, Mocks: mockFetchError, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantErrMsg: "error fetching", WantPresent: []runtime.Object{ - getNewClusterProvisioner(clusterProvisionerName, true), - getNewChannel(channelName, clusterProvisionerName), + getNewClusterChannelProvisioner(clusterChannelProvisionerName, true), + getNewChannel(channelName, clusterChannelProvisionerName), }, - IgnoreTimes: true, }, { Name: "deleted channel", InitialState: []runtime.Object{ - getNewClusterProvisioner(clusterProvisionerName, true), - getNewChannelDeleted(channelName, clusterProvisionerName), + getNewClusterChannelProvisioner(clusterChannelProvisionerName, true), + getNewChannelDeleted(channelName, clusterChannelProvisionerName), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, @@ -247,7 +246,7 @@ func TestProvisionChannel(t *testing.T) { }{ { name: "provision with no channel arguments - uses default", - c: getNewChannel(channelName, clusterProvisionerName), + c: getNewChannel(channelName, clusterChannelProvisionerName), wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), wantTopicDetail: &sarama.TopicDetail{ ReplicationFactor: 1, @@ -271,7 +270,7 @@ func TestProvisionChannel(t *testing.T) { { name: "provision with unmarshallable channel arguments - errors", c: func() *eventingv1alpha1.Channel { - channel := getNewChannel(channelName, clusterProvisionerName) + channel := getNewChannel(channelName, clusterChannelProvisionerName) channel.Spec.Arguments = &runtime.RawExtension{ Raw: []byte("invalid"), } @@ -344,20 +343,20 @@ func TestDeprovisionChannel(t *testing.T) { }{ { name: "deprovision channel - unknown error", - c: getNewChannel(channelName, clusterProvisionerName), + c: getNewChannel(channelName, clusterChannelProvisionerName), wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), mockError: fmt.Errorf("unknown sarama error"), wantError: "unknown sarama error", }, { name: "deprovision channel - topic already deleted", - c: getNewChannel(channelName, clusterProvisionerName), + c: getNewChannel(channelName, clusterChannelProvisionerName), wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), mockError: sarama.ErrUnknownTopicOrPartition, }, { name: "deprovision channel - success", - c: getNewChannel(channelName, clusterProvisionerName), + c: getNewChannel(channelName, clusterChannelProvisionerName), wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), }} @@ -404,7 +403,7 @@ func getNewChannel(name, provisioner string) *eventingv1alpha1.Channel { Provisioner: &eventingv1alpha1.ProvisionerReference{ Ref: &corev1.ObjectReference{ Name: provisioner, - Kind: "ClusterProvisioner", + Kind: "ClusterChannelProvisioner", APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), }, }, @@ -452,29 +451,37 @@ func channelType() metav1.TypeMeta { } } -func getNewClusterProvisioner(name string, isReady bool) *eventingv1alpha1.ClusterProvisioner { - clusterProvisioner := &eventingv1alpha1.ClusterProvisioner{ +func getNewClusterChannelProvisioner(name string, isReady bool) *eventingv1alpha1.ClusterChannelProvisioner { + var condStatus corev1.ConditionStatus + if isReady { + condStatus = corev1.ConditionTrue + } else { + condStatus = corev1.ConditionFalse + } + clusterChannelProvisioner := &eventingv1alpha1.ClusterChannelProvisioner{ TypeMeta: metav1.TypeMeta{ APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), - Kind: "ClusterProvisioner", + Kind: "ClusterChannelProvisioner", }, ObjectMeta: om("", name), - Spec: eventingv1alpha1.ClusterProvisionerSpec{ + Spec: eventingv1alpha1.ClusterChannelProvisionerSpec{ Reconciles: metav1.GroupKind{ Kind: "Channel", Group: eventing.GroupName, }, }, - } - clusterProvisioner.Status.InitializeConditions() - if isReady { - clusterProvisioner.Status.MarkReady() - } else { - clusterProvisioner.Status.MarkNotReady("NotReady", "testing") + Status: eventingv1alpha1.ClusterChannelProvisionerStatus{ + Conditions: []duckv1alpha1.Condition{ + { + Type: eventingv1alpha1.ClusterChannelProvisionerConditionReady, + Status: condStatus, + }, + }, + }, } // selflink is not filled in when we create the object, so clear it - clusterProvisioner.ObjectMeta.SelfLink = "" - return clusterProvisioner + clusterChannelProvisioner.ObjectMeta.SelfLink = "" + return clusterChannelProvisioner } func om(namespace, name string) metav1.ObjectMeta { @@ -487,7 +494,7 @@ func om(namespace, name string) metav1.ObjectMeta { func getControllerConfig() *controller.KafkaProvisionerConfig { return &controller.KafkaProvisionerConfig{ - Name: clusterProvisionerName, + Name: clusterChannelProvisionerName, Brokers: []string{"test-broker"}, } } diff --git a/pkg/provisioners/kafka/controller/provider.go b/pkg/provisioners/kafka/controller/provider.go index 8cfb91eb68a..59c0ec94697 100644 --- a/pkg/provisioners/kafka/controller/provider.go +++ b/pkg/provisioners/kafka/controller/provider.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "go.uber.org/zap" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" @@ -25,8 +26,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" ) const ( @@ -59,8 +58,8 @@ func ProvideController(mgr manager.Manager, config *KafkaProvisionerConfig, logg return nil, err } - // Watch ClusterProvisioner events and enqueue ClusterProvisioner object key. - if err := c.Watch(&source.Kind{Type: &v1alpha1.ClusterProvisioner{}}, &handler.EnqueueRequestForObject{}); err != nil { + // Watch ClusterChannelProvisioner events and enqueue ClusterChannelProvisioner object key. + if err := c.Watch(&source.Kind{Type: &v1alpha1.ClusterChannelProvisioner{}}, &handler.EnqueueRequestForObject{}); err != nil { return nil, err } diff --git a/pkg/provisioners/kafka/controller/reconcile.go b/pkg/provisioners/kafka/controller/reconcile.go index 3fbe31a3280..cb0c89f215d 100644 --- a/pkg/provisioners/kafka/controller/reconcile.go +++ b/pkg/provisioners/kafka/controller/reconcile.go @@ -20,32 +20,31 @@ import ( "context" "fmt" + "github.com/knative/eventing/pkg/apis/eventing" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "go.uber.org/zap" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/knative/eventing/pkg/apis/eventing" - "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "go.uber.org/zap" ) // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Provisioner resource // with the current status of the resource. func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) { - r.logger.Info("reconciling ClusterProvisioner", zap.Any("request", request)) - provisioner := &v1alpha1.ClusterProvisioner{} + r.logger.Info("reconciling ClusterChannelProvisioner", zap.Any("request", request)) + provisioner := &v1alpha1.ClusterChannelProvisioner{} err := r.client.Get(context.TODO(), request.NamespacedName, provisioner) if errors.IsNotFound(err) { - r.logger.Info("could not find ClusterProvisioner", zap.Any("request", request)) + r.logger.Info("could not find ClusterChannelProvisioner", zap.Any("request", request)) return reconcile.Result{}, nil } if err != nil { - r.logger.Error("could not fetch ClusterProvisioner", zap.Error(err)) + r.logger.Error("could not fetch ClusterChannelProvisioner", zap.Error(err)) return reconcile.Result{}, err } @@ -79,7 +78,7 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err return reconcile.Result{}, err } -func (r *reconciler) reconcile(provisioner *v1alpha1.ClusterProvisioner) error { +func (r *reconciler) reconcile(provisioner *v1alpha1.ClusterChannelProvisioner) error { // See if the provisioner has been deleted accessor, err := meta.Accessor(provisioner) if err != nil { @@ -99,8 +98,8 @@ func (r *reconciler) reconcile(provisioner *v1alpha1.ClusterProvisioner) error { return nil } -func (r *reconciler) updateStatus(provisioner *v1alpha1.ClusterProvisioner) (*v1alpha1.ClusterProvisioner, error) { - newProvisioner := &v1alpha1.ClusterProvisioner{} +func (r *reconciler) updateStatus(provisioner *v1alpha1.ClusterChannelProvisioner) (*v1alpha1.ClusterChannelProvisioner, error) { + newProvisioner := &v1alpha1.ClusterChannelProvisioner{} err := r.client.Get(context.TODO(), client.ObjectKey{Namespace: provisioner.Namespace, Name: provisioner.Name}, newProvisioner) if err != nil { diff --git a/pkg/provisioners/kafka/controller/reconcile_test.go b/pkg/provisioners/kafka/controller/reconcile_test.go index fda1839a6c1..c21d26706ef 100644 --- a/pkg/provisioners/kafka/controller/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/reconcile_test.go @@ -21,6 +21,10 @@ import ( "fmt" "testing" + "github.com/knative/eventing/pkg/apis/eventing" + eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + controllertesting "github.com/knative/eventing/pkg/controller/testing" + "github.com/knative/eventing/pkg/provisioners" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,16 +33,11 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/knative/eventing/pkg/apis/eventing" - eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - controllertesting "github.com/knative/eventing/pkg/controller/testing" - "github.com/knative/eventing/pkg/provisioners" ) const ( - clusterProvisionerName = "kafka" - testNS = "" + clusterChannelProvisionerName = "kafka" + testNS = "" ) func init() { @@ -46,15 +45,15 @@ func init() { eventingv1alpha1.AddToScheme(scheme.Scheme) } -var ClusterProvisionerConditionReady = duckv1alpha1.Condition{ - Type: eventingv1alpha1.ClusterProvisionerConditionReady, +var ClusterChannelProvisionerConditionReady = duckv1alpha1.Condition{ + Type: eventingv1alpha1.ClusterChannelProvisionerConditionReady, Status: corev1.ConditionTrue, } var mockFetchError = controllertesting.Mocks{ MockGets: []controllertesting.MockGet{ func(innerClient client.Client, ctx context.Context, key client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) { - if _, ok := obj.(*eventingv1alpha1.ClusterProvisioner); ok { + if _, ok := obj.(*eventingv1alpha1.ClusterChannelProvisioner); ok { err := fmt.Errorf("error fetching") return controllertesting.Handled, err } @@ -65,67 +64,67 @@ var mockFetchError = controllertesting.Mocks{ var testCases = []controllertesting.TestCase{ { - Name: "new channel clusterprovisioner: adds status", + Name: "new channel clusterChannelProvisioner: adds status", InitialState: []runtime.Object{ - GetNewChannelClusterProvisioner(clusterProvisionerName), + GetNewChannelClusterChannelProvisioner(clusterChannelProvisionerName), }, - ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterProvisionerName), + ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterChannelProvisionerName), WantResult: reconcile.Result{}, WantPresent: []runtime.Object{ - GetNewChannelClusterProvisionerReady(clusterProvisionerName), + GetNewChannelClusterChannelProvisionerReady(clusterChannelProvisionerName), }, IgnoreTimes: true, }, { Name: "reconciles only channel kind", InitialState: []runtime.Object{ - getNewClusterProvisioner(clusterProvisionerName, "Source"), + getNewClusterChannelProvisioner(clusterChannelProvisionerName, "Source"), }, - ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterProvisionerName), + ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterChannelProvisionerName), WantResult: reconcile.Result{}, WantPresent: []runtime.Object{ - getNewClusterProvisioner(clusterProvisionerName, "Source"), + getNewClusterChannelProvisioner(clusterChannelProvisionerName, "Source"), }, }, { Name: "reconciles only associated provisioner", InitialState: []runtime.Object{ - GetNewChannelClusterProvisioner("not-default-provisioner"), + GetNewChannelClusterChannelProvisioner("not-default-provisioner"), }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, "not-default-provisioner"), WantResult: reconcile.Result{}, WantPresent: []runtime.Object{ - GetNewChannelClusterProvisioner("not-default-provisioner"), + GetNewChannelClusterChannelProvisioner("not-default-provisioner"), }, }, { - Name: "clusterprovisioner not found", + Name: "clusterChannelProvisioner not found", InitialState: []runtime.Object{}, - ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterProvisionerName), + ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterChannelProvisionerName), WantResult: reconcile.Result{}, WantPresent: []runtime.Object{}, }, { - Name: "error fetching clusterprovisioner", + Name: "error fetching clusterChannelProvisioner", InitialState: []runtime.Object{ - GetNewChannelClusterProvisioner(clusterProvisionerName), + GetNewChannelClusterChannelProvisioner(clusterChannelProvisionerName), }, Mocks: mockFetchError, - ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterProvisionerName), + ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterChannelProvisionerName), WantErrMsg: "error fetching", WantPresent: []runtime.Object{ - GetNewChannelClusterProvisioner(clusterProvisionerName), + GetNewChannelClusterChannelProvisioner(clusterChannelProvisionerName), }, }, { - Name: "deleted clusterprovisioner", + Name: "deleted clusterChannelProvisioner", InitialState: []runtime.Object{ - GetNewChannelClusterProvisionerDeleted(clusterProvisionerName), + GetNewChannelClusterChannelProvisionerDeleted(clusterChannelProvisionerName), }, - ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterProvisionerName), + ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterChannelProvisionerName), WantResult: reconcile.Result{}, WantPresent: []runtime.Object{ - GetNewChannelClusterProvisionerDeleted(clusterProvisionerName), + GetNewChannelClusterChannelProvisionerDeleted(clusterChannelProvisionerName), }, }, } @@ -147,15 +146,15 @@ func TestAllCases(t *testing.T) { } } -func GetNewChannelClusterProvisioner(name string) *eventingv1alpha1.ClusterProvisioner { - return getNewClusterProvisioner(name, "Channel") +func GetNewChannelClusterChannelProvisioner(name string) *eventingv1alpha1.ClusterChannelProvisioner { + return getNewClusterChannelProvisioner(name, "Channel") } -func getNewClusterProvisioner(name string, reconcileKind string) *eventingv1alpha1.ClusterProvisioner { - clusterProvisioner := &eventingv1alpha1.ClusterProvisioner{ +func getNewClusterChannelProvisioner(name string, reconcileKind string) *eventingv1alpha1.ClusterChannelProvisioner { + clusterChannelProvisioner := &eventingv1alpha1.ClusterChannelProvisioner{ TypeMeta: ClusterProvisonerType(), ObjectMeta: om(testNS, name), - Spec: eventingv1alpha1.ClusterProvisionerSpec{ + Spec: eventingv1alpha1.ClusterChannelProvisionerSpec{ Reconciles: metav1.GroupKind{ Kind: reconcileKind, Group: eventing.GroupName, @@ -163,22 +162,22 @@ func getNewClusterProvisioner(name string, reconcileKind string) *eventingv1alph }, } // selflink is not filled in when we create the object, so clear it - clusterProvisioner.ObjectMeta.SelfLink = "" - return clusterProvisioner + clusterChannelProvisioner.ObjectMeta.SelfLink = "" + return clusterChannelProvisioner } -func GetNewChannelClusterProvisionerReady(name string) *eventingv1alpha1.ClusterProvisioner { - c := GetNewChannelClusterProvisioner(name) - c.Status = eventingv1alpha1.ClusterProvisionerStatus{ +func GetNewChannelClusterChannelProvisionerReady(name string) *eventingv1alpha1.ClusterChannelProvisioner { + c := GetNewChannelClusterChannelProvisioner(name) + c.Status = eventingv1alpha1.ClusterChannelProvisionerStatus{ Conditions: []duckv1alpha1.Condition{ - ClusterProvisionerConditionReady, + ClusterChannelProvisionerConditionReady, }, } return c } -func GetNewChannelClusterProvisionerDeleted(name string) *eventingv1alpha1.ClusterProvisioner { - c := GetNewChannelClusterProvisioner(name) +func GetNewChannelClusterChannelProvisionerDeleted(name string) *eventingv1alpha1.ClusterChannelProvisioner { + c := GetNewChannelClusterChannelProvisioner(name) deletedTime := metav1.Now().Rfc3339Copy() c.DeletionTimestamp = &deletedTime return c @@ -187,7 +186,7 @@ func GetNewChannelClusterProvisionerDeleted(name string) *eventingv1alpha1.Clust func ClusterProvisonerType() metav1.TypeMeta { return metav1.TypeMeta{ APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), - Kind: "ClusterProvisioner", + Kind: "ClusterChannelProvisioner", } } @@ -201,7 +200,7 @@ func om(namespace, name string) metav1.ObjectMeta { func getControllerConfig() *KafkaProvisionerConfig { return &KafkaProvisionerConfig{ - Name: clusterProvisionerName, + Name: clusterChannelProvisionerName, Brokers: []string{"test-broker"}, } } diff --git a/pkg/provisioners/kafka/main.go b/pkg/provisioners/kafka/main.go index db1e7c64fb2..95d470cd783 100644 --- a/pkg/provisioners/kafka/main.go +++ b/pkg/provisioners/kafka/main.go @@ -6,6 +6,11 @@ import ( "os" "strings" + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + "github.com/knative/eventing/pkg/provisioners" + provisionerController "github.com/knative/eventing/pkg/provisioners/kafka/controller" + "github.com/knative/eventing/pkg/provisioners/kafka/controller/channel" + "github.com/knative/pkg/configmap" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" @@ -14,17 +19,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" - - "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - "github.com/knative/eventing/pkg/provisioners" - provisionerController "github.com/knative/eventing/pkg/provisioners/kafka/controller" - "github.com/knative/eventing/pkg/provisioners/kafka/controller/channel" - "github.com/knative/pkg/configmap" ) const ( - ClusterProvisionerNameConfigMapKey = "cluster-provisioner-name" - BrokerConfigMapKey = "brokers" + ClusterChannelProvisionerNameConfigMapKey = "cluster-provisioner-name" + BrokerConfigMapKey = "brokers" ) // SchemeFunc adds types to a Scheme. @@ -79,7 +78,7 @@ func main() { mgr.Start(signals.SetupSignalHandler()) } -// getProvisionerConfig returns the details of the associated Provisioner/ClusterProvisioner object +// getProvisionerConfig returns the details of the associated Provisioner/ClusterChannelProvisioner object func getProvisionerConfig() (*provisionerController.KafkaProvisionerConfig, error) { configMap, err := configmap.Load("/etc/config-provisioner") if err != nil { @@ -92,10 +91,10 @@ func getProvisionerConfig() (*provisionerController.KafkaProvisionerConfig, erro config := &provisionerController.KafkaProvisionerConfig{} - if value, ok := configMap[ClusterProvisionerNameConfigMapKey]; ok { + if value, ok := configMap[ClusterChannelProvisionerNameConfigMapKey]; ok { config.Name = value } else { - return nil, fmt.Errorf("missing key %s in provisioner configuration", ClusterProvisionerNameConfigMapKey) + return nil, fmt.Errorf("missing key %s in provisioner configuration", ClusterChannelProvisionerNameConfigMapKey) } if value, ok := configMap[BrokerConfigMapKey]; ok { From b66ec8f8da10be8eb75f4c8ce3025d7362d5e18f Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Mon, 29 Oct 2018 16:24:49 -0700 Subject: [PATCH 12/20] Fix few more after pr 562 --- .../kafka/controller/channel/reconcile.go | 9 ++--- .../controller/channel/reconcile_test.go | 33 +++++++++---------- .../kafka/controller/reconcile.go | 6 ---- .../kafka/controller/reconcile_test.go | 19 +---------- 4 files changed, 21 insertions(+), 46 deletions(-) diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go index 8ddcdf9bec4..e4991b6182f 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -59,23 +59,24 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } // Skip Channel as it is not targeting any provisioner - if channel.Spec.Provisioner == nil || channel.Spec.Provisioner.Ref == nil { + if channel.Spec.Provisioner == nil { return reconcile.Result{}, nil } // Skip channel not managed by this provisioner - provisionerRef := channel.Spec.Provisioner.Ref clusterChannelProvisioner, err := r.getClusterChannelProvisioner() if err != nil { return reconcile.Result{}, err } - + provisionerRef := channel.Spec.Provisioner if provisionerRef.Name != clusterChannelProvisioner.Name || provisionerRef.Namespace != clusterChannelProvisioner.Namespace { return reconcile.Result{}, nil } newChannel := channel.DeepCopy() + newChannel.Status.InitializeConditions() + if clusterChannelProvisioner.Status.IsReady() { // Reconcile this copy of the Channel and then write back any status // updates regardless of whether the reconcile error out. @@ -112,7 +113,7 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { } r.addFinalizer(channel) - channel.Status.InitializeConditions() + if err := r.provisionChannel(channel); err != nil { channel.Status.MarkNotProvisioned("NotProvisioned", "error while provisioning: %s", err) return err diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go index a304f747b68..0d461ac460c 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -24,11 +24,6 @@ import ( "github.com/Shopify/sarama" "github.com/ghodss/yaml" "github.com/google/go-cmp/cmp" - "github.com/knative/eventing/pkg/apis/eventing" - eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - controllertesting "github.com/knative/eventing/pkg/controller/testing" - "github.com/knative/eventing/pkg/provisioners" - "github.com/knative/eventing/pkg/provisioners/kafka/controller" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,6 +32,11 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + controllertesting "github.com/knative/eventing/pkg/controller/testing" + "github.com/knative/eventing/pkg/provisioners" + "github.com/knative/eventing/pkg/provisioners/kafka/controller" ) const ( @@ -153,7 +153,7 @@ var testCases = []controllertesting.TestCase{ }, ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName), WantResult: reconcile.Result{}, - WantErrMsg: "clusterChannelProvisioners.eventing.knative.dev \"" + clusterChannelProvisionerName + "\" not found", + WantErrMsg: "clusterchannelprovisioners.eventing.knative.dev \"" + clusterChannelProvisionerName + "\" not found", IgnoreTimes: true, }, { @@ -400,12 +400,10 @@ func getNewChannel(name, provisioner string) *eventingv1alpha1.Channel { TypeMeta: channelType(), ObjectMeta: om(testNS, name), Spec: eventingv1alpha1.ChannelSpec{ - Provisioner: &eventingv1alpha1.ProvisionerReference{ - Ref: &corev1.ObjectReference{ - Name: provisioner, - Kind: "ClusterChannelProvisioner", - APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), - }, + Provisioner: &corev1.ObjectReference{ + Name: provisioner, + Kind: "ClusterChannelProvisioner", + APIVersion: eventingv1alpha1.SchemeGroupVersion.String(), }, }, } @@ -464,18 +462,17 @@ func getNewClusterChannelProvisioner(name string, isReady bool) *eventingv1alpha Kind: "ClusterChannelProvisioner", }, ObjectMeta: om("", name), - Spec: eventingv1alpha1.ClusterChannelProvisionerSpec{ - Reconciles: metav1.GroupKind{ - Kind: "Channel", - Group: eventing.GroupName, - }, - }, + Spec: eventingv1alpha1.ClusterChannelProvisionerSpec{}, Status: eventingv1alpha1.ClusterChannelProvisionerStatus{ Conditions: []duckv1alpha1.Condition{ { Type: eventingv1alpha1.ClusterChannelProvisionerConditionReady, Status: condStatus, }, + { + Type: eventingv1alpha1.ChannelConditionSinkable, + Status: "Unknown", + }, }, }, } diff --git a/pkg/provisioners/kafka/controller/reconcile.go b/pkg/provisioners/kafka/controller/reconcile.go index cb0c89f215d..afefe882cc6 100644 --- a/pkg/provisioners/kafka/controller/reconcile.go +++ b/pkg/provisioners/kafka/controller/reconcile.go @@ -20,7 +20,6 @@ import ( "context" "fmt" - "github.com/knative/eventing/pkg/apis/eventing" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "go.uber.org/zap" "k8s.io/apimachinery/pkg/api/equality" @@ -53,11 +52,6 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err return reconcile.Result{}, nil } - // Only reconcile channel provisioners - if provisioner.Spec.Reconciles.Group != eventing.GroupName || provisioner.Spec.Reconciles.Kind != "Channel" { - return reconcile.Result{}, nil - } - original := provisioner.DeepCopy() // Reconcile this copy of the Provisioner and then write back any status diff --git a/pkg/provisioners/kafka/controller/reconcile_test.go b/pkg/provisioners/kafka/controller/reconcile_test.go index c21d26706ef..19c795367e2 100644 --- a/pkg/provisioners/kafka/controller/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/reconcile_test.go @@ -21,7 +21,6 @@ import ( "fmt" "testing" - "github.com/knative/eventing/pkg/apis/eventing" eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" controllertesting "github.com/knative/eventing/pkg/controller/testing" "github.com/knative/eventing/pkg/provisioners" @@ -75,17 +74,6 @@ var testCases = []controllertesting.TestCase{ }, IgnoreTimes: true, }, - { - Name: "reconciles only channel kind", - InitialState: []runtime.Object{ - getNewClusterChannelProvisioner(clusterChannelProvisionerName, "Source"), - }, - ReconcileKey: fmt.Sprintf("%s/%s", testNS, clusterChannelProvisionerName), - WantResult: reconcile.Result{}, - WantPresent: []runtime.Object{ - getNewClusterChannelProvisioner(clusterChannelProvisionerName, "Source"), - }, - }, { Name: "reconciles only associated provisioner", InitialState: []runtime.Object{ @@ -154,12 +142,7 @@ func getNewClusterChannelProvisioner(name string, reconcileKind string) *eventin clusterChannelProvisioner := &eventingv1alpha1.ClusterChannelProvisioner{ TypeMeta: ClusterProvisonerType(), ObjectMeta: om(testNS, name), - Spec: eventingv1alpha1.ClusterChannelProvisionerSpec{ - Reconciles: metav1.GroupKind{ - Kind: reconcileKind, - Group: eventing.GroupName, - }, - }, + Spec: eventingv1alpha1.ClusterChannelProvisionerSpec{}, } // selflink is not filled in when we create the object, so clear it clusterChannelProvisioner.ObjectMeta.SelfLink = "" From e4131f49954aefcc3917a0c56819828041dba179 Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Mon, 29 Oct 2018 16:50:27 -0700 Subject: [PATCH 13/20] Fix tests and imports --- .../provisioners/kafka/kafka-provisioner.yaml | 5 +---- .../inmemory/channel/reconcile_test.go | 18 ++++++------------ .../clusterchannelprovisioner/reconcile.go | 8 ++++---- .../reconcile_test.go | 14 +++----------- .../kafka/controller/channel/reconcile.go | 6 +++--- .../kafka/controller/channel/reconcile_test.go | 6 +++--- 6 files changed, 20 insertions(+), 37 deletions(-) diff --git a/config/provisioners/kafka/kafka-provisioner.yaml b/config/provisioners/kafka/kafka-provisioner.yaml index 3ee163bba96..f439f193643 100644 --- a/config/provisioners/kafka/kafka-provisioner.yaml +++ b/config/provisioners/kafka/kafka-provisioner.yaml @@ -16,10 +16,7 @@ apiVersion: eventing.knative.dev/v1alpha1 kind: ClusterChannelProvisioner metadata: name: kafka-channel -spec: - reconciles: - group: eventing.knative.dev/v1alpha1 - kind: Channel +spec: {} --- apiVersion: v1 diff --git a/pkg/controller/eventing/inmemory/channel/reconcile_test.go b/pkg/controller/eventing/inmemory/channel/reconcile_test.go index bde7b039744..f3a3bda8df0 100644 --- a/pkg/controller/eventing/inmemory/channel/reconcile_test.go +++ b/pkg/controller/eventing/inmemory/channel/reconcile_test.go @@ -111,10 +111,8 @@ var ( Kind: "Channel", }, Spec: eventingv1alpha1.ChannelSpec{ - Provisioner: &eventingv1alpha1.ProvisionerReference{ - Ref: &corev1.ObjectReference{ - Name: ccpName, - }, + Provisioner: &corev1.ObjectReference{ + Name: ccpName, }, Channelable: &eventingduck.Channelable{ Subscribers: []eventingduck.ChannelSubscriberSpec{ @@ -162,10 +160,8 @@ var ( Kind: "Channel", }, Spec: eventingv1alpha1.ChannelSpec{ - Provisioner: &eventingv1alpha1.ProvisionerReference{ - Ref: &corev1.ObjectReference{ - Name: ccpName, - }, + Provisioner: &corev1.ObjectReference{ + Name: ccpName, }, Channelable: &eventingduck.Channelable{ Subscribers: []eventingduck.ChannelSubscriberSpec{ @@ -470,10 +466,8 @@ func makeChannel() *eventingv1alpha1.Channel { UID: cUID, }, Spec: eventingv1alpha1.ChannelSpec{ - Provisioner: &eventingv1alpha1.ProvisionerReference{ - Ref: &corev1.ObjectReference{ - Name: ccpName, - }, + Provisioner: &corev1.ObjectReference{ + Name: ccpName, }, }, } diff --git a/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile.go b/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile.go index 2fc32debef5..ce6ac719695 100644 --- a/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile.go +++ b/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile.go @@ -105,9 +105,9 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err // IsControlled determines if the in-memory Channel Controller should control (and therefore // reconcile) a given object, based on that object's ClusterChannelProvisioner reference. -func IsControlled(ref *eventingv1alpha1.ProvisionerReference) bool { - if ref != nil && ref.Ref != nil { - return shouldReconcile(ref.Ref.Namespace, ref.Ref.Name) +func IsControlled(ref *corev1.ObjectReference) bool { + if ref != nil { + return shouldReconcile(ref.Namespace, ref.Name) } return false } @@ -214,6 +214,6 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner) *core func dispatcherLabels(ccpName string) map[string]string { return map[string]string{ "clusterChannelProvisioner": ccpName, - "role": "dispatcher", + "role": "dispatcher", } } diff --git a/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile_test.go b/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile_test.go index 099e85c6a90..b4f9945cc2f 100644 --- a/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile_test.go +++ b/pkg/controller/eventing/inmemory/clusterchannelprovisioner/reconcile_test.go @@ -75,19 +75,13 @@ func TestInjectClient(t *testing.T) { func TestIsControlled(t *testing.T) { testCases := map[string]struct { - ref *eventingv1alpha1.ProvisionerReference + ref *corev1.ObjectReference isControlled bool }{ "nil": { ref: nil, isControlled: false, }, - "ref nil": { - ref: &eventingv1alpha1.ProvisionerReference{ - Ref: nil, - }, - isControlled: false, - }, "wrong namespace": { ref: &corev1.ObjectReference{ Namespace: "other", @@ -96,10 +90,8 @@ func TestIsControlled(t *testing.T) { isControlled: false, }, "wrong name": { - ref: &eventingv1alpha1.ProvisionerReference{ - Ref: &corev1.ObjectReference{ - Name: "other-name", - }, + ref: &corev1.ObjectReference{ + Name: "other-name", }, isControlled: false, }, diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go index e4991b6182f..63713d3b2f0 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -18,10 +18,10 @@ package channel import ( "context" + "encoding/json" "fmt" "github.com/Shopify/sarama" - "github.com/ghodss/yaml" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "go.uber.org/zap" "k8s.io/apimachinery/pkg/api/equality" @@ -227,8 +227,8 @@ func topicName(channel *v1alpha1.Channel) string { func unmarshalArguments(bytes []byte) (map[string]interface{}, error) { arguments := make(map[string]interface{}) if len(bytes) > 0 { - if err := yaml.Unmarshal(bytes, &arguments); err != nil { - return nil, err + if err := json.Unmarshal(bytes, &arguments); err != nil { + return nil, fmt.Errorf("error unmarshalling arguments: %s", err) } } return arguments, nil diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go index 0d461ac460c..d677a46ac09 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -18,11 +18,11 @@ package channel import ( "context" + "encoding/json" "fmt" "testing" "github.com/Shopify/sarama" - "github.com/ghodss/yaml" "github.com/google/go-cmp/cmp" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" corev1 "k8s.io/api/core/v1" @@ -276,7 +276,7 @@ func TestProvisionChannel(t *testing.T) { } return channel }(), - wantError: "error unmarshaling JSON: json: cannot unmarshal string into Go value of type map[string]interface {}", + wantError: "error unmarshalling arguments: invalid character 'i' looking for beginning of value", }, { name: "provision with valid channel arguments", @@ -414,7 +414,7 @@ func getNewChannel(name, provisioner string) *eventingv1alpha1.Channel { func getNewChannelWithArgs(name string, args map[string]interface{}) *eventingv1alpha1.Channel { c := getNewChannelNoProvisioner(name) - bytes, _ := yaml.Marshal(args) + bytes, _ := json.Marshal(args) c.Spec.Arguments = &runtime.RawExtension{ Raw: bytes, } From 712fbc403964f99802d7f51bc461b0fb59797a53 Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Wed, 31 Oct 2018 11:33:16 -0700 Subject: [PATCH 14/20] PR feedback for removing ClusterChannelProvisioner name from configmap There were some concerns in fetching the provisioner name from a config map. --- config/provisioners/kafka/kafka-provisioner.yaml | 2 -- pkg/provisioners/kafka/controller/channel/reconcile.go | 6 ++++-- .../kafka/controller/channel/reconcile_test.go | 3 +-- pkg/provisioners/kafka/controller/reconcile.go | 8 +++++++- pkg/provisioners/kafka/controller/reconcile_test.go | 3 +-- pkg/provisioners/kafka/controller/types.go | 1 - pkg/provisioners/kafka/main.go | 9 +-------- 7 files changed, 14 insertions(+), 18 deletions(-) diff --git a/config/provisioners/kafka/kafka-provisioner.yaml b/config/provisioners/kafka/kafka-provisioner.yaml index f439f193643..821dbc7bd0a 100644 --- a/config/provisioners/kafka/kafka-provisioner.yaml +++ b/config/provisioners/kafka/kafka-provisioner.yaml @@ -56,8 +56,6 @@ metadata: name: kafka-channel-controller-config namespace: knative-eventing data: - # Name of the provisioner that this controller represents - cluster-provisioner-name: kafka # Broker URL's for the provisioner brokers: kafkabroker.kafka:9092 --- diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go index 63713d3b2f0..420a5591f27 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -30,6 +30,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/knative/eventing/pkg/provisioners/kafka/controller" ) const ( @@ -69,7 +71,7 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err return reconcile.Result{}, err } provisionerRef := channel.Spec.Provisioner - if provisionerRef.Name != clusterChannelProvisioner.Name || provisionerRef.Namespace != clusterChannelProvisioner.Namespace { + if provisionerRef.Name != clusterChannelProvisioner.Name { return reconcile.Result{}, nil } @@ -175,7 +177,7 @@ func (r *reconciler) deprovisionChannel(channel *v1alpha1.Channel) error { func (r *reconciler) getClusterChannelProvisioner() (*v1alpha1.ClusterChannelProvisioner, error) { clusterChannelProvisioner := &v1alpha1.ClusterChannelProvisioner{} objKey := client.ObjectKey{ - Name: r.config.Name, + Name: controller.Name, } if err := r.client.Get(context.Background(), objKey, clusterChannelProvisioner); err != nil { return nil, err diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go index d677a46ac09..f2abf05ccb2 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -41,7 +41,7 @@ import ( const ( channelName = "test-channel" - clusterChannelProvisionerName = "kafka" + clusterChannelProvisionerName = "kafka-channel" testNS = "test-namespace" ) @@ -491,7 +491,6 @@ func om(namespace, name string) metav1.ObjectMeta { func getControllerConfig() *controller.KafkaProvisionerConfig { return &controller.KafkaProvisionerConfig{ - Name: clusterChannelProvisionerName, Brokers: []string{"test-broker"}, } } diff --git a/pkg/provisioners/kafka/controller/reconcile.go b/pkg/provisioners/kafka/controller/reconcile.go index afefe882cc6..0f72d7fc4a0 100644 --- a/pkg/provisioners/kafka/controller/reconcile.go +++ b/pkg/provisioners/kafka/controller/reconcile.go @@ -29,6 +29,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) +const ( + // Name is the name of the kafka ClusterChannelProvisioner. + Name = "kafka-channel" +) + // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Provisioner resource // with the current status of the resource. @@ -48,7 +53,8 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } // Skip channel provisioners that we don't manage - if provisioner.Name != r.config.Name { + if provisioner.Name != Name { + r.logger.Info("not reconciling ClusterChannelProvisioner, it is not controlled by this Controller", zap.Any("request", request)) return reconcile.Result{}, nil } diff --git a/pkg/provisioners/kafka/controller/reconcile_test.go b/pkg/provisioners/kafka/controller/reconcile_test.go index 19c795367e2..e7ab3eea676 100644 --- a/pkg/provisioners/kafka/controller/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/reconcile_test.go @@ -35,7 +35,7 @@ import ( ) const ( - clusterChannelProvisionerName = "kafka" + clusterChannelProvisionerName = "kafka-channel" testNS = "" ) @@ -183,7 +183,6 @@ func om(namespace, name string) metav1.ObjectMeta { func getControllerConfig() *KafkaProvisionerConfig { return &KafkaProvisionerConfig{ - Name: clusterChannelProvisionerName, Brokers: []string{"test-broker"}, } } diff --git a/pkg/provisioners/kafka/controller/types.go b/pkg/provisioners/kafka/controller/types.go index 620063ff2bd..46a85cea400 100644 --- a/pkg/provisioners/kafka/controller/types.go +++ b/pkg/provisioners/kafka/controller/types.go @@ -1,6 +1,5 @@ package controller type KafkaProvisionerConfig struct { - Name string Brokers []string } diff --git a/pkg/provisioners/kafka/main.go b/pkg/provisioners/kafka/main.go index 95d470cd783..fa4f88623c1 100644 --- a/pkg/provisioners/kafka/main.go +++ b/pkg/provisioners/kafka/main.go @@ -22,8 +22,7 @@ import ( ) const ( - ClusterChannelProvisionerNameConfigMapKey = "cluster-provisioner-name" - BrokerConfigMapKey = "brokers" + BrokerConfigMapKey = "brokers" ) // SchemeFunc adds types to a Scheme. @@ -91,12 +90,6 @@ func getProvisionerConfig() (*provisionerController.KafkaProvisionerConfig, erro config := &provisionerController.KafkaProvisionerConfig{} - if value, ok := configMap[ClusterChannelProvisionerNameConfigMapKey]; ok { - config.Name = value - } else { - return nil, fmt.Errorf("missing key %s in provisioner configuration", ClusterChannelProvisionerNameConfigMapKey) - } - if value, ok := configMap[BrokerConfigMapKey]; ok { brokers := strings.Split(value, ",") if len(brokers) == 0 { From c5dc9697bb211c4ff85505f2d7030c19962a4d40 Mon Sep 17 00:00:00 2001 From: Matthias Wessendorf Date: Mon, 5 Nov 2018 14:02:42 +0100 Subject: [PATCH 15/20] Adding instructions for Channel provisioner --- config/provisioners/kafka/README.md | 38 +++++++++++++++++++ config/provisioners/kafka/broker/README.md | 4 +- .../provisioners/kafka/kafka-provisioner.yaml | 2 +- config/provisioners/kafka/strimzi/README.md | 31 +++++++++++++++ .../kafka/strimzi/kafka-ephemeral.yaml | 23 +++++++++++ .../kafka/strimzi/kafka-persistent.yaml | 27 +++++++++++++ 6 files changed, 122 insertions(+), 3 deletions(-) create mode 100644 config/provisioners/kafka/README.md create mode 100644 config/provisioners/kafka/strimzi/README.md create mode 100644 config/provisioners/kafka/strimzi/kafka-ephemeral.yaml create mode 100644 config/provisioners/kafka/strimzi/kafka-persistent.yaml diff --git a/config/provisioners/kafka/README.md b/config/provisioners/kafka/README.md new file mode 100644 index 00000000000..cf9bb522541 --- /dev/null +++ b/config/provisioners/kafka/README.md @@ -0,0 +1,38 @@ +# Apache Kafka Channels + +Deployment steps: +1. Setup [Knative Eventing](../../../DEVELOPMENT.md) +1. Install an Apache Kafka cluster. There are two choices: + * Simple installation of [Apache Kafka](broker). + * A production grade installation using the [Strimzi Kafka Operator](strimzi). + +1. Now that the Apache Kafka is installed, apply the 'Kafka' ClusterChannelProvisioner: + ``` + ko apply -f config/provisioners/kafka/kafka-provisioner.yaml + ``` + > Note: If you are using Strimzi, you need to update the `KAFKA_BOOTSTRAP_SERVERS` value in the `kafka-channel-controller-config` ConfigMap to `my-cluster-kafka-bootstrap.kafka.9092`. +1. Create Channels that reference the 'kafka-channel'. + + ```yaml + apiVersion: eventing.knative.dev/v1alpha1 + kind: Channel + metadata: + name: my-kafka-channel + spec: + provisioner: + apiVersion: eventing.knative.dev/v1alpha1 + kind: ClusterChannelProvisioner + name: kafka-channel + ``` +1. (Optional) Install [Kail](https://github.com/boz/kail) - Kubernetes tail + +## Components + +The major components are: +* ClusterChannelProvisioner Controller +* Channel Controller Config Map + +The ClusterChannelProvisioner Controller and the Channel Controller are colocated in one Pod. +```shell +kubectl get deployment -n knative-eventing kafka-channel-controller +``` diff --git a/config/provisioners/kafka/broker/README.md b/config/provisioners/kafka/broker/README.md index c8b6da84cbe..0c75dde69b5 100644 --- a/config/provisioners/kafka/broker/README.md +++ b/config/provisioners/kafka/broker/README.md @@ -1,4 +1,4 @@ -# Apache Kakfa - simple installation +# Apache Kafka - simple installation 1. For an installation of a simple Apache Kafka cluster, a setup is provided: ``` @@ -10,4 +10,4 @@ oc adm policy add-scc-to-user anyuid -z default -n kafka ``` -Continue the configuration of Knative Eventing with [step `3`](../). \ No newline at end of file +Continue the configuration of Knative Eventing with [step `3`](../). diff --git a/config/provisioners/kafka/kafka-provisioner.yaml b/config/provisioners/kafka/kafka-provisioner.yaml index 821dbc7bd0a..746e39ab341 100644 --- a/config/provisioners/kafka/kafka-provisioner.yaml +++ b/config/provisioners/kafka/kafka-provisioner.yaml @@ -57,7 +57,7 @@ metadata: namespace: knative-eventing data: # Broker URL's for the provisioner - brokers: kafkabroker.kafka:9092 + bootstrap_servers: kafkabroker.kafka:9092 --- apiVersion: apps/v1beta1 diff --git a/config/provisioners/kafka/strimzi/README.md b/config/provisioners/kafka/strimzi/README.md new file mode 100644 index 00000000000..efd7d5437df --- /dev/null +++ b/config/provisioners/kafka/strimzi/README.md @@ -0,0 +1,31 @@ +# Strimzi - Apache Kafka Operator + +[Strimzi](http://strimzi.io) makes it easy to run a production grade Apache Kafka installation on OpenShift or Kubernetes. It implements the _Kubernetes Operator pattern_ for mananging `clusters`, `topics` or `users` based on custom resource files. + +Installing the Strimzi Cluster Operator is simple and requires only a few steps. + +1. Create the `kafka` namespace in your Kubernetes cluster: + ``` + kubectl create namespace kafka + ``` + +1. Install the Strimzi _Cluster Operator_: + + * Applying yaml files from the [Strimzi release bundle](https://github.com/strimzi/strimzi-kafka-operator/releases/latest) + * Using the Strimzi Helm Chart + + Both ways for installing the _Cluster Operator_ are described in the [Strimzi documentation](http://strimzi.io/docs/master/#cluster-operator-str) itself + + > Note: Once this is done, you will have a `strimzi-cluster-operator` pod, which is able to install the Apache Kafka broker based on a `Kafka` custom resource file. + +1. Install the Apache Kafka cluster by providing the `kafka-persistent.yaml` Strimzi resource file from _this_ folder: + ``` + kubectl apply -f kafka-persistent.yaml -n kafka + ``` + > Note: If you want to use ephemeral storage, you have to use the `kafka-ephemeral.yaml` file. + + This provisions the complete installation of your Apache Kafka cluster. + +> Note: For learning more about Strimiz, please consult its [website](http://strimzi.io). + +Continue the configuration of Knative Eventing with [step `3`](../). diff --git a/config/provisioners/kafka/strimzi/kafka-ephemeral.yaml b/config/provisioners/kafka/strimzi/kafka-ephemeral.yaml new file mode 100644 index 00000000000..6423bd39de9 --- /dev/null +++ b/config/provisioners/kafka/strimzi/kafka-ephemeral.yaml @@ -0,0 +1,23 @@ +apiVersion: kafka.strimzi.io/v1alpha1 +kind: Kafka +metadata: + name: my-cluster +spec: + kafka: + replicas: 1 + listeners: + plain: {} + tls: {} + config: + offsets.topic.replication.factor: 3 + transaction.state.log.replication.factor: 3 + transaction.state.log.min.isr: 2 + storage: + type: ephemeral + zookeeper: + replicas: 1 + storage: + type: ephemeral + entityOperator: + topicOperator: {} + userOperator: {} diff --git a/config/provisioners/kafka/strimzi/kafka-persistent.yaml b/config/provisioners/kafka/strimzi/kafka-persistent.yaml new file mode 100644 index 00000000000..ea5fd60ce4d --- /dev/null +++ b/config/provisioners/kafka/strimzi/kafka-persistent.yaml @@ -0,0 +1,27 @@ +apiVersion: kafka.strimzi.io/v1alpha1 +kind: Kafka +metadata: + name: my-cluster +spec: + kafka: + replicas: 1 + listeners: + plain: {} + tls: {} + config: + offsets.topic.replication.factor: 3 + transaction.state.log.replication.factor: 3 + transaction.state.log.min.isr: 2 + storage: + type: persistent-claim + size: 1Gi + deleteClaim: false + zookeeper: + replicas: 1 + storage: + type: persistent-claim + size: 1Gi + deleteClaim: false + entityOperator: + topicOperator: {} + userOperator: {} From 9934290a267b065608d357682823bf063d57c797 Mon Sep 17 00:00:00 2001 From: Matthias Wessendorf Date: Mon, 5 Nov 2018 14:03:23 +0100 Subject: [PATCH 16/20] short cut code for missing ... --- pkg/provisioners/kafka/main.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/pkg/provisioners/kafka/main.go b/pkg/provisioners/kafka/main.go index fa4f88623c1..6da4e1932fa 100644 --- a/pkg/provisioners/kafka/main.go +++ b/pkg/provisioners/kafka/main.go @@ -22,7 +22,7 @@ import ( ) const ( - BrokerConfigMapKey = "brokers" + BrokerConfigMapKey = "bootstrap_servers" ) // SchemeFunc adds types to a Scheme. @@ -91,12 +91,11 @@ func getProvisionerConfig() (*provisionerController.KafkaProvisionerConfig, erro config := &provisionerController.KafkaProvisionerConfig{} if value, ok := configMap[BrokerConfigMapKey]; ok { - brokers := strings.Split(value, ",") - if len(brokers) == 0 { - return nil, fmt.Errorf("missing kafka brokers in provisioner configuration") + bootstrapServers := strings.Split(value, ",") + if len(bootstrapServers) != 0 { + config.Brokers = bootstrapServers + return config, nil } - config.Brokers = brokers - return config, nil } return nil, fmt.Errorf("missing key %s in provisioner configuration", BrokerConfigMapKey) From 8a54f24c253ff319e671785cef90aa9bb6fd256a Mon Sep 17 00:00:00 2001 From: Matthias Wessendorf Date: Mon, 5 Nov 2018 15:38:55 +0100 Subject: [PATCH 17/20] Updating to latest Kafka client, and fixing idle bug --- Gopkg.lock | 5 +- Gopkg.toml | 3 +- .../kafka/controller/channel/provider.go | 16 - .../kafka/controller/channel/reconcile.go | 30 +- vendor/github.com/Shopify/sarama/admin.go | 11 +- .../Shopify/sarama/balance_strategy.go | 129 + vendor/github.com/Shopify/sarama/broker.go | 1 + vendor/github.com/Shopify/sarama/client.go | 42 +- vendor/github.com/Shopify/sarama/config.go | 119 +- .../Shopify/sarama/consumer_group.go | 774 ++++++ .../Shopify/sarama/fetch_response.go | 23 +- .../github.com/Shopify/sarama/length_field.go | 15 +- .../github.com/Shopify/sarama/message_set.go | 8 +- .../Shopify/sarama/metadata_request.go | 2 +- .../Shopify/sarama/metadata_response.go | 11 + .../Shopify/sarama/offset_manager.go | 662 ++--- vendor/github.com/Shopify/sarama/records.go | 21 + vendor/github.com/Shopify/sarama/utils.go | 4 +- .../.github/pull-request-template.md | 7 + .../knative/test-infra/CONTRIBUTING.md | 3 + .../github.com/knative/test-infra/Gopkg.lock | 28 + .../github.com/knative/test-infra/Gopkg.toml | 14 + vendor/github.com/knative/test-infra/LICENSE | 202 ++ vendor/github.com/knative/test-infra/OWNERS | 7 + .../github.com/knative/test-infra/README.md | 17 + .../github.com/knative/test-infra/WORKSPACE | 52 + .../knative/test-infra/ci/README.md | 3 + .../knative/test-infra/ci/gubernator/Makefile | 33 + .../test-infra/ci/gubernator/README.md | 7 + .../test-infra/ci/gubernator/config.yaml | 71 + .../test-infra/ci/gubernator/redir_github.py | 25 + .../knative/test-infra/ci/prow/Makefile | 42 + .../knative/test-infra/ci/prow/README.md | 10 + .../test-infra/ci/prow/boskos/README.md | 6 + .../test-infra/ci/prow/boskos/config.yaml | 152 ++ .../ci/prow/boskos/config_start.yaml | 23 + .../test-infra/ci/prow/boskos/resources.yaml | 38 + .../knative/test-infra/ci/prow/cluster.yaml | 350 +++ .../knative/test-infra/ci/prow/config.yaml | 2211 +++++++++++++++++ .../test-infra/ci/prow/config_start.yaml | 339 +++ .../knative/test-infra/ci/prow/plugins.yaml | 41 + .../knative/test-infra/ci/prow/prow_setup.md | 71 + .../knative/test-infra/ci/testgrid/Makefile | 29 + .../knative/test-infra/ci/testgrid/README.md | 6 + .../test-infra/ci/testgrid/config.yaml | 213 ++ vendor/github.com/knative/test-infra/dummy.go | 10 + .../knative/test-infra/images/README.md | 3 + .../test-infra/images/apicoverage/Dockerfile | 20 + .../test-infra/images/apicoverage/Makefile | 23 + .../test-infra/images/apicoverage/README.md | 3 + .../test-infra/images/prow-tests/Dockerfile | 56 + .../test-infra/images/prow-tests/Makefile | 34 + .../test-infra/images/prow-tests/README.md | 13 + .../knative/test-infra/test/e2e-tests.sh | 50 + .../test-infra/test/presubmit-tests.sh | 49 + .../test/unit/e2e-custom-flag-tests.sh | 38 + .../test-infra/test/unit/library-tests.sh | 50 + ...presubmit-full-custom-integration-tests.sh | 28 + .../presubmit-integration-tests-common.sh | 48 + ...submit-partial-custom-integration-tests.sh | 33 + .../test-infra/test/unit/release-tests.sh | 73 + .../knative/test-infra/tools/README.md | 3 + .../test-infra/tools/apicoverage/README.md | 14 + .../tools/apicoverage/apicoverage.go | 241 ++ .../test-infra/tools/dep-collector/README.md | 88 + .../test-infra/tools/dep-collector/imports.go | 94 + .../tools/dep-collector/licenses.go | 203 ++ .../test-infra/tools/dep-collector/main.go | 81 + .../knative/test-infra/tools/gcs/gcs.go | 112 + .../test-infra/tools/githubhelper/Makefile | 17 + .../test-infra/tools/githubhelper/README.md | 10 + .../tools/githubhelper/githubhelper.go | 85 + .../test-infra/tools/testgrid/testgrid.go | 69 + 73 files changed, 7048 insertions(+), 376 deletions(-) create mode 100644 vendor/github.com/Shopify/sarama/balance_strategy.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_group.go create mode 100644 vendor/github.com/knative/test-infra/.github/pull-request-template.md create mode 100644 vendor/github.com/knative/test-infra/CONTRIBUTING.md create mode 100644 vendor/github.com/knative/test-infra/Gopkg.lock create mode 100644 vendor/github.com/knative/test-infra/Gopkg.toml create mode 100644 vendor/github.com/knative/test-infra/LICENSE create mode 100644 vendor/github.com/knative/test-infra/OWNERS create mode 100644 vendor/github.com/knative/test-infra/README.md create mode 100644 vendor/github.com/knative/test-infra/WORKSPACE create mode 100644 vendor/github.com/knative/test-infra/ci/README.md create mode 100644 vendor/github.com/knative/test-infra/ci/gubernator/Makefile create mode 100644 vendor/github.com/knative/test-infra/ci/gubernator/README.md create mode 100644 vendor/github.com/knative/test-infra/ci/gubernator/config.yaml create mode 100644 vendor/github.com/knative/test-infra/ci/gubernator/redir_github.py create mode 100644 vendor/github.com/knative/test-infra/ci/prow/Makefile create mode 100644 vendor/github.com/knative/test-infra/ci/prow/README.md create mode 100644 vendor/github.com/knative/test-infra/ci/prow/boskos/README.md create mode 100644 vendor/github.com/knative/test-infra/ci/prow/boskos/config.yaml create mode 100644 vendor/github.com/knative/test-infra/ci/prow/boskos/config_start.yaml create mode 100644 vendor/github.com/knative/test-infra/ci/prow/boskos/resources.yaml create mode 100644 vendor/github.com/knative/test-infra/ci/prow/cluster.yaml create mode 100644 vendor/github.com/knative/test-infra/ci/prow/config.yaml create mode 100644 vendor/github.com/knative/test-infra/ci/prow/config_start.yaml create mode 100644 vendor/github.com/knative/test-infra/ci/prow/plugins.yaml create mode 100644 vendor/github.com/knative/test-infra/ci/prow/prow_setup.md create mode 100644 vendor/github.com/knative/test-infra/ci/testgrid/Makefile create mode 100644 vendor/github.com/knative/test-infra/ci/testgrid/README.md create mode 100644 vendor/github.com/knative/test-infra/ci/testgrid/config.yaml create mode 100644 vendor/github.com/knative/test-infra/dummy.go create mode 100644 vendor/github.com/knative/test-infra/images/README.md create mode 100644 vendor/github.com/knative/test-infra/images/apicoverage/Dockerfile create mode 100644 vendor/github.com/knative/test-infra/images/apicoverage/Makefile create mode 100644 vendor/github.com/knative/test-infra/images/apicoverage/README.md create mode 100644 vendor/github.com/knative/test-infra/images/prow-tests/Dockerfile create mode 100644 vendor/github.com/knative/test-infra/images/prow-tests/Makefile create mode 100644 vendor/github.com/knative/test-infra/images/prow-tests/README.md create mode 100755 vendor/github.com/knative/test-infra/test/e2e-tests.sh create mode 100755 vendor/github.com/knative/test-infra/test/presubmit-tests.sh create mode 100755 vendor/github.com/knative/test-infra/test/unit/e2e-custom-flag-tests.sh create mode 100755 vendor/github.com/knative/test-infra/test/unit/library-tests.sh create mode 100755 vendor/github.com/knative/test-infra/test/unit/presubmit-full-custom-integration-tests.sh create mode 100755 vendor/github.com/knative/test-infra/test/unit/presubmit-integration-tests-common.sh create mode 100755 vendor/github.com/knative/test-infra/test/unit/presubmit-partial-custom-integration-tests.sh create mode 100755 vendor/github.com/knative/test-infra/test/unit/release-tests.sh create mode 100644 vendor/github.com/knative/test-infra/tools/README.md create mode 100644 vendor/github.com/knative/test-infra/tools/apicoverage/README.md create mode 100644 vendor/github.com/knative/test-infra/tools/apicoverage/apicoverage.go create mode 100644 vendor/github.com/knative/test-infra/tools/dep-collector/README.md create mode 100644 vendor/github.com/knative/test-infra/tools/dep-collector/imports.go create mode 100644 vendor/github.com/knative/test-infra/tools/dep-collector/licenses.go create mode 100644 vendor/github.com/knative/test-infra/tools/dep-collector/main.go create mode 100644 vendor/github.com/knative/test-infra/tools/gcs/gcs.go create mode 100644 vendor/github.com/knative/test-infra/tools/githubhelper/Makefile create mode 100644 vendor/github.com/knative/test-infra/tools/githubhelper/README.md create mode 100644 vendor/github.com/knative/test-infra/tools/githubhelper/githubhelper.go create mode 100644 vendor/github.com/knative/test-infra/tools/testgrid/testgrid.go diff --git a/Gopkg.lock b/Gopkg.lock index c63559599f7..8126e4e1f11 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -17,11 +17,12 @@ revision = "90f2606161ee6a14efe2ca79fc05ac2b8efe250b" [[projects]] - digest = "1:f2bb07cb70ceaecfffa034919e418793eef0960480474c276173b725449fdb1f" + digest = "1:a074ae0f4788ea4c4c7045ab37f21943920bc20cf6ff8afcb2d971154cfa87ab" name = "github.com/Shopify/sarama" packages = ["."] pruneopts = "NUT" - revision = "46cf3e2cf1acef7876068f66cf69ec31aad2d0b2" + revision = "ec843464b50d4c8b56403ec9d589cf41ea30e722" + version = "v1.19.0" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index a03659e1eb1..c2222020dca 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -85,8 +85,7 @@ required = [ [[override]] name = "github.com/Shopify/sarama" - revision = "46cf3e2cf1acef7876068f66cf69ec31aad2d0b2" # includes higher level admin client -# version = "1.17.0" + version = "1.19.0" [[constraint]] name = "github.com/bsm/sarama-cluster" diff --git a/pkg/provisioners/kafka/controller/channel/provider.go b/pkg/provisioners/kafka/controller/channel/provider.go index 07e6ea4ca12..7ffdfcd21b1 100644 --- a/pkg/provisioners/kafka/controller/channel/provider.go +++ b/pkg/provisioners/kafka/controller/channel/provider.go @@ -17,9 +17,6 @@ limitations under the License. package channel import ( - "fmt" - - "github.com/Shopify/sarama" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" common "github.com/knative/eventing/pkg/provisioners/kafka/controller" "go.uber.org/zap" @@ -43,7 +40,6 @@ type reconciler struct { recorder record.EventRecorder logger *zap.Logger config *common.KafkaProvisionerConfig - kafkaClusterAdmin sarama.ClusterAdmin } // Verify the struct implements reconcile.Reconciler @@ -52,16 +48,11 @@ var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a Channel controller. func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfig, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Channel. - clusterAdmin, err := getKafkaAdminClient(config) - if err != nil { - return nil, fmt.Errorf("unable to build kafka admin client: %s", err) - } c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ recorder: mgr.GetRecorder(controllerAgentName), logger: logger, config: config, - kafkaClusterAdmin: clusterAdmin, }, }) if err != nil { @@ -76,13 +67,6 @@ func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfi return c, nil } -func getKafkaAdminClient(config *common.KafkaProvisionerConfig) (sarama.ClusterAdmin, error) { - saramaConf := sarama.NewConfig() - saramaConf.Version = sarama.V1_1_0_0 - saramaConf.ClientID = controllerAgentName - return sarama.NewClusterAdmin(config.Brokers, saramaConf) -} - func (r *reconciler) InjectClient(c client.Client) error { r.client = c return nil diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go index 420a5591f27..8e19a5875ce 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -104,10 +104,17 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { r.logger.Info("failed to get metadata", zap.Error(err)) return err } + + kafkaClusterAdmin, err := createKafkaAdminClient(r.config) + if err != nil { + r.logger.Fatal("unable to build kafka admin client", zap.Error(err)) + return err + } + deletionTimestamp := accessor.GetDeletionTimestamp() if deletionTimestamp != nil { r.logger.Info(fmt.Sprintf("DeletionTimestamp: %v", deletionTimestamp)) - if err := r.deprovisionChannel(channel); err != nil { + if err := r.deprovisionChannel(channel, kafkaClusterAdmin); err != nil { return err } r.removeFinalizer(channel) @@ -116,15 +123,19 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { r.addFinalizer(channel) - if err := r.provisionChannel(channel); err != nil { + if err := r.provisionChannel(channel, kafkaClusterAdmin); err != nil { channel.Status.MarkNotProvisioned("NotProvisioned", "error while provisioning: %s", err) return err } channel.Status.MarkProvisioned() + + // close the connection + kafkaClusterAdmin.Close(); + return nil } -func (r *reconciler) provisionChannel(channel *v1alpha1.Channel) error { +func (r *reconciler) provisionChannel(channel *v1alpha1.Channel, kafkaClusterAdmin sarama.ClusterAdmin) error { topicName := topicName(channel) r.logger.Info("creating topic on kafka cluster", zap.String("topic", topicName)) @@ -145,7 +156,7 @@ func (r *reconciler) provisionChannel(channel *v1alpha1.Channel) error { } } - err := r.kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ + err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ ReplicationFactor: 1, NumPartitions: int32(partitions), }, false) @@ -159,11 +170,11 @@ func (r *reconciler) provisionChannel(channel *v1alpha1.Channel) error { return err } -func (r *reconciler) deprovisionChannel(channel *v1alpha1.Channel) error { +func (r *reconciler) deprovisionChannel(channel *v1alpha1.Channel, kafkaClusterAdmin sarama.ClusterAdmin) error { topicName := topicName(channel) r.logger.Info("deleting topic on kafka cluster", zap.String("topic", topicName)) - err := r.kafkaClusterAdmin.DeleteTopic(topicName) + err := kafkaClusterAdmin.DeleteTopic(topicName) if err == sarama.ErrUnknownTopicOrPartition { return nil } else if err != nil { @@ -221,6 +232,13 @@ func (r *reconciler) removeFinalizer(channel *v1alpha1.Channel) { channel.Finalizers = finalizers.List() } +func createKafkaAdminClient(config *controller.KafkaProvisionerConfig) (sarama.ClusterAdmin, error) { + saramaConf := sarama.NewConfig() + saramaConf.Version = sarama.V1_1_0_0 + saramaConf.ClientID = controllerAgentName + return sarama.NewClusterAdmin(config.Brokers, saramaConf) +} + func topicName(channel *v1alpha1.Channel) string { return fmt.Sprintf("%s.%s", channel.Namespace, channel.Name) } diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go index 68284641c82..52725758d21 100644 --- a/vendor/github.com/Shopify/sarama/admin.go +++ b/vendor/github.com/Shopify/sarama/admin.go @@ -118,6 +118,7 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO request := &CreateTopicsRequest{ TopicDetails: topicDetails, ValidateOnly: validateOnly, + Timeout: ca.conf.Admin.Timeout, } if ca.conf.Version.IsAtLeast(V0_11_0_0) { @@ -155,7 +156,10 @@ func (ca *clusterAdmin) DeleteTopic(topic string) error { return ErrInvalidTopic } - request := &DeleteTopicsRequest{Topics: []string{topic}} + request := &DeleteTopicsRequest{ + Topics: []string{topic}, + Timeout: ca.conf.Admin.Timeout, + } if ca.conf.Version.IsAtLeast(V0_11_0_0) { request.Version = 1 @@ -192,6 +196,7 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ request := &CreatePartitionsRequest{ TopicPartitions: topicPartitions, + Timeout: ca.conf.Admin.Timeout, } b, err := ca.Controller() @@ -225,7 +230,9 @@ func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]i topics := make(map[string]*DeleteRecordsRequestTopic) topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: partitionOffsets} request := &DeleteRecordsRequest{ - Topics: topics} + Topics: topics, + Timeout: ca.conf.Admin.Timeout, + } b, err := ca.Controller() if err != nil { diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go new file mode 100644 index 00000000000..e78988d7181 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/balance_strategy.go @@ -0,0 +1,129 @@ +package sarama + +import ( + "math" + "sort" +) + +// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt. +// It contains an allocation of topic/partitions by memberID in the form of +// a `memberID -> topic -> partitions` map. +type BalanceStrategyPlan map[string]map[string][]int32 + +// Add assigns a topic with a number partitions to a member. +func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) { + if len(partitions) == 0 { + return + } + if _, ok := p[memberID]; !ok { + p[memberID] = make(map[string][]int32, 1) + } + p[memberID][topic] = append(p[memberID][topic], partitions...) +} + +// -------------------------------------------------------------------- + +// BalanceStrategy is used to balance topics and partitions +// across memebers of a consumer group +type BalanceStrategy interface { + // Name uniquely identifies the strategy. + Name() string + + // Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions` + // and returns a distribution plan. + Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) +} + +// -------------------------------------------------------------------- + +// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members. +// Example with one topic T with six partitions (0..5) and two members (M1, M2): +// M1: {T: [0, 1, 2]} +// M2: {T: [3, 4, 5]} +var BalanceStrategyRange = &balanceStrategy{ + name: "range", + coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { + step := float64(len(partitions)) / float64(len(memberIDs)) + + for i, memberID := range memberIDs { + pos := float64(i) + min := int(math.Floor(pos*step + 0.5)) + max := int(math.Floor((pos+1)*step + 0.5)) + plan.Add(memberID, topic, partitions[min:max]...) + } + }, +} + +// BalanceStrategyRoundRobin assigns partitions to members in alternating order. +// Example with topic T with six partitions (0..5) and two members (M1, M2): +// M1: {T: [0, 2, 4]} +// M2: {T: [1, 3, 5]} +var BalanceStrategyRoundRobin = &balanceStrategy{ + name: "roundrobin", + coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { + for i, part := range partitions { + memberID := memberIDs[i%len(memberIDs)] + plan.Add(memberID, topic, part) + } + }, +} + +// -------------------------------------------------------------------- + +type balanceStrategy struct { + name string + coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) +} + +// Name implements BalanceStrategy. +func (s *balanceStrategy) Name() string { return s.name } + +// Balance implements BalanceStrategy. +func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { + // Build members by topic map + mbt := make(map[string][]string) + for memberID, meta := range members { + for _, topic := range meta.Topics { + mbt[topic] = append(mbt[topic], memberID) + } + } + + // Sort members for each topic + for topic, memberIDs := range mbt { + sort.Sort(&balanceStrategySortable{ + topic: topic, + memberIDs: memberIDs, + }) + } + + // Assemble plan + plan := make(BalanceStrategyPlan, len(members)) + for topic, memberIDs := range mbt { + s.coreFn(plan, memberIDs, topic, topics[topic]) + } + return plan, nil +} + +type balanceStrategySortable struct { + topic string + memberIDs []string +} + +func (p balanceStrategySortable) Len() int { return len(p.memberIDs) } +func (p balanceStrategySortable) Swap(i, j int) { + p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i] +} +func (p balanceStrategySortable) Less(i, j int) bool { + return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j]) +} + +func balanceStrategyHashValue(vv ...string) uint32 { + h := uint32(2166136261) + for _, s := range vv { + for _, c := range s { + h ^= uint32(c) + h *= 16777619 + } + } + return h +} diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go index 6430fd999fa..26f63d51d6d 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -86,6 +86,7 @@ func (b *Broker) Open(conf *Config) error { dialer := net.Dialer{ Timeout: conf.Net.DialTimeout, KeepAlive: conf.Net.KeepAlive, + LocalAddr: conf.Net.LocalAddr, } if conf.Net.TLS.Enable { diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go index 019cb43735a..ad805346b4b 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/Shopify/sarama/client.go @@ -17,7 +17,7 @@ type Client interface { // altered after it has been created. Config() *Config - // Controller returns the cluster controller broker. + // Controller returns the cluster controller broker. Requires Kafka 0.10 or higher. Controller() (*Broker, error) // Brokers returns the current set of active brokers as retrieved from cluster metadata. @@ -100,10 +100,11 @@ type client struct { seedBrokers []*Broker deadSeeds []*Broker - controllerID int32 // cluster controller broker id - brokers map[int32]*Broker // maps broker ids to brokers - metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata - coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs + controllerID int32 // cluster controller broker id + brokers map[int32]*Broker // maps broker ids to brokers + metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata + metadataTopics map[string]none // topics that need to collect metadata + coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs // If the number of partitions is large, we can get some churn calling cachedPartitions, // so the result is cached. It is important to update this value whenever metadata is changed @@ -136,6 +137,7 @@ func NewClient(addrs []string, conf *Config) (Client, error) { closed: make(chan none), brokers: make(map[int32]*Broker), metadata: make(map[string]map[int32]*PartitionMetadata), + metadataTopics: make(map[string]none), cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), coordinators: make(map[string]int32), } @@ -207,6 +209,7 @@ func (client *client) Close() error { client.brokers = nil client.metadata = nil + client.metadataTopics = nil return nil } @@ -231,6 +234,22 @@ func (client *client) Topics() ([]string, error) { return ret, nil } +func (client *client) MetadataTopics() ([]string, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.lock.RLock() + defer client.lock.RUnlock() + + ret := make([]string, 0, len(client.metadataTopics)) + for topic := range client.metadataTopics { + ret = append(ret, topic) + } + + return ret, nil +} + func (client *client) Partitions(topic string) ([]int32, error) { if client.Closed() { return nil, ErrClosedClient @@ -388,6 +407,10 @@ func (client *client) Controller() (*Broker, error) { return nil, ErrClosedClient } + if !client.conf.Version.IsAtLeast(V0_10_0_0) { + return nil, ErrUnsupportedVersion + } + controller := client.cachedController() if controller == nil { if err := client.refreshMetadata(); err != nil { @@ -645,7 +668,7 @@ func (client *client) refreshMetadata() error { topics := []string{} if !client.conf.Metadata.Full { - if specificTopics, err := client.Topics(); err != nil { + if specificTopics, err := client.MetadataTopics(); err != nil { return err } else if len(specificTopics) == 0 { return ErrNoTopicsToUpdateMetadata @@ -728,9 +751,16 @@ func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bo if allKnownMetaData { client.metadata = make(map[string]map[int32]*PartitionMetadata) + client.metadataTopics = make(map[string]none) client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32) } for _, topic := range data.Topics { + // topics must be added firstly to `metadataTopics` to guarantee that all + // requested topics must be recorded to keep them trackable for periodically + // metadata refresh. + if _, exists := client.metadataTopics[topic.Name]; !exists { + client.metadataTopics[topic.Name] = none{} + } delete(client.metadata, topic.Name) delete(client.cachedPartitionsResults, topic.Name) diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go index a564b5c23e4..faf11e83839 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/Shopify/sarama/config.go @@ -5,6 +5,7 @@ import ( "crypto/tls" "fmt" "io/ioutil" + "net" "regexp" "time" @@ -17,6 +18,13 @@ var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) // Config is used to pass multiple configuration options to Sarama's constructors. type Config struct { + // Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client. + Admin struct { + // The maximum duration the administrative Kafka client will wait for ClusterAdmin operations, + // including topics, brokers, configurations and ACLs (defaults to 3 seconds). + Timeout time.Duration + } + // Net is the namespace for network-level properties used by the Broker, and // shared by the Client/Producer/Consumer. Net struct { @@ -58,6 +66,12 @@ type Config struct { // KeepAlive specifies the keep-alive period for an active network connection. // If zero, keep-alives are disabled. (default is 0: disabled). KeepAlive time.Duration + + // LocalAddr is the local address to use when dialing an + // address. The address must be of a compatible type for the + // network being dialed. + // If nil, a local address is automatically chosen. + LocalAddr net.Addr } // Metadata is the namespace for metadata management properties used by the @@ -159,14 +173,55 @@ type Config struct { // Consumer is the namespace for configuration related to consuming messages, // used by the Consumer. - // - // Note that Sarama's Consumer type does not currently support automatic - // consumer-group rebalancing and offset tracking. For Zookeeper-based - // tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka - // library builds on Sarama to add this support. For Kafka-based tracking - // (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library - // builds on Sarama to add this support. Consumer struct { + + // Group is the namespace for configuring consumer group. + Group struct { + Session struct { + // The timeout used to detect consumer failures when using Kafka's group management facility. + // The consumer sends periodic heartbeats to indicate its liveness to the broker. + // If no heartbeats are received by the broker before the expiration of this session timeout, + // then the broker will remove this consumer from the group and initiate a rebalance. + // Note that the value must be in the allowable range as configured in the broker configuration + // by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s) + Timeout time.Duration + } + Heartbeat struct { + // The expected time between heartbeats to the consumer coordinator when using Kafka's group + // management facilities. Heartbeats are used to ensure that the consumer's session stays active and + // to facilitate rebalancing when new consumers join or leave the group. + // The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no + // higher than 1/3 of that value. + // It can be adjusted even lower to control the expected time for normal rebalances (default 3s) + Interval time.Duration + } + Rebalance struct { + // Strategy for allocating topic partitions to members (default BalanceStrategyRange) + Strategy BalanceStrategy + // The maximum allowed time for each worker to join the group once a rebalance has begun. + // This is basically a limit on the amount of time needed for all tasks to flush any pending + // data and commit offsets. If the timeout is exceeded, then the worker will be removed from + // the group, which will cause offset commit failures (default 60s). + Timeout time.Duration + + Retry struct { + // When a new consumer joins a consumer group the set of consumers attempt to "rebalance" + // the load to assign partitions to each consumer. If the set of consumers changes while + // this assignment is taking place the rebalance will fail and retry. This setting controls + // the maximum number of attempts before giving up (default 4). + Max int + // Backoff time between retries during rebalance (default 2s) + Backoff time.Duration + } + } + Member struct { + // Custom metadata to include when joining the group. The user data for all joined members + // can be retrieved by sending a DescribeGroupRequest to the broker that is the + // coordinator for the group. + UserData []byte + } + } + Retry struct { // How long to wait after a failing to read from a partition before // trying again (default 2s). @@ -248,6 +303,12 @@ type Config struct { // broker version 0.9.0 or later. // (default is 0: disabled). Retention time.Duration + + Retry struct { + // The total number of times to retry failing commit + // requests during OffsetManager shutdown (default 3). + Max int + } } } @@ -279,6 +340,8 @@ type Config struct { func NewConfig() *Config { c := &Config{} + c.Admin.Timeout = 3 * time.Second + c.Net.MaxOpenRequests = 5 c.Net.DialTimeout = 30 * time.Second c.Net.ReadTimeout = 30 * time.Second @@ -307,6 +370,14 @@ func NewConfig() *Config { c.Consumer.Return.Errors = false c.Consumer.Offsets.CommitInterval = 1 * time.Second c.Consumer.Offsets.Initial = OffsetNewest + c.Consumer.Offsets.Retry.Max = 3 + + c.Consumer.Group.Session.Timeout = 10 * time.Second + c.Consumer.Group.Heartbeat.Interval = 3 * time.Second + c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange + c.Consumer.Group.Rebalance.Timeout = 60 * time.Second + c.Consumer.Group.Rebalance.Retry.Max = 4 + c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second c.ClientID = defaultClientID c.ChannelBufferSize = 256 @@ -355,6 +426,15 @@ func (c *Config) Validate() error { if c.Consumer.Offsets.Retention%time.Millisecond != 0 { Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.") } + if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 { + Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 { + Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 { + Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.") + } if c.ClientID == defaultClientID { Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.") } @@ -377,6 +457,12 @@ func (c *Config) Validate() error { return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") } + // validate the Admin values + switch { + case c.Admin.Timeout <= 0: + return ConfigurationError("Admin.Timeout must be > 0") + } + // validate the Metadata values switch { case c.Metadata.Retry.Max < 0: @@ -443,7 +529,26 @@ func (c *Config) Validate() error { return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0") case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest: return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest") + case c.Consumer.Offsets.Retry.Max < 0: + return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0") + } + // validate the Consumer Group values + switch { + case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond: + return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms") + case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond: + return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms") + case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout: + return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout") + case c.Consumer.Group.Rebalance.Strategy == nil: + return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty") + case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond: + return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms") + case c.Consumer.Group.Rebalance.Retry.Max < 0: + return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0") + case c.Consumer.Group.Rebalance.Retry.Backoff < 0: + return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0") } // validate misc shared values diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go new file mode 100644 index 00000000000..33a231477f9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_group.go @@ -0,0 +1,774 @@ +package sarama + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + "time" +) + +// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed. +var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed") + +// ConsumerGroup is responsible for dividing up processing of topics and partitions +// over a collection of processes (the members of the consumer group). +type ConsumerGroup interface { + // Consume joins a cluster of consumers for a given list of topics and + // starts a blocking ConsumerGroupSession through the ConsumerGroupHandler. + // + // The life-cycle of a session is represented by the following steps: + // + // 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers) + // and is assigned their "fair share" of partitions, aka 'claims'. + // 2. Before processing starts, the handler's Setup() hook is called to notify the user + // of the claims and allow any necessary preparation or alteration of state. + // 3. For each of the assigned claims the handler's ConsumeClaim() function is then called + // in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected + // from concurrent reads/writes. + // 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the + // parent context is cancelled or when a server-side rebalance cycle is initiated. + // 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called + // to allow the user to perform any final tasks before a rebalance. + // 6. Finally, marked offsets are committed one last time before claims are released. + // + // Please note, that once a relance is triggered, sessions must be completed within + // Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit + // as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout + // is exceeded, the consumer will be removed from the group by Kafka, which will cause offset + // commit failures. + Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error + + // Errors returns a read channel of errors that occurred during the consumer life-cycle. + // By default, errors are logged and not returned over this channel. + // If you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan error + + // Close stops the ConsumerGroup and detaches any running sessions. It is required to call + // this function before the object passes out of scope, as it will otherwise leak memory. + Close() error +} + +type consumerGroup struct { + client Client + ownClient bool + + config *Config + consumer Consumer + groupID string + memberID string + errors chan error + + lock sync.Mutex + closed chan none + closeOnce sync.Once +} + +// NewConsumerGroup creates a new consumer group the given broker addresses and configuration. +func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) { + client, err := NewClient(addrs, config) + if err != nil { + return nil, err + } + + c, err := NewConsumerGroupFromClient(groupID, client) + if err != nil { + _ = client.Close() + return nil, err + } + + c.(*consumerGroup).ownClient = true + return c, nil +} + +// NewConsumerFromClient creates a new consumer group using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this consumer. +// PLEASE NOTE: consumer groups can only re-use but not share clients. +func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) { + config := client.Config() + if !config.Version.IsAtLeast(V0_10_2_0) { + return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0") + } + + consumer, err := NewConsumerFromClient(client) + if err != nil { + return nil, err + } + + return &consumerGroup{ + client: client, + consumer: consumer, + config: config, + groupID: groupID, + errors: make(chan error, config.ChannelBufferSize), + closed: make(chan none), + }, nil +} + +// Errors implements ConsumerGroup. +func (c *consumerGroup) Errors() <-chan error { return c.errors } + +// Close implements ConsumerGroup. +func (c *consumerGroup) Close() (err error) { + c.closeOnce.Do(func() { + close(c.closed) + + c.lock.Lock() + defer c.lock.Unlock() + + // leave group + if e := c.leave(); e != nil { + err = e + } + + // drain errors + go func() { + close(c.errors) + }() + for e := range c.errors { + err = e + } + + if c.ownClient { + if e := c.client.Close(); e != nil { + err = e + } + } + }) + return +} + +// Consume implements ConsumerGroup. +func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error { + // Ensure group is not closed + select { + case <-c.closed: + return ErrClosedConsumerGroup + default: + } + + c.lock.Lock() + defer c.lock.Unlock() + + // Quick exit when no topics are provided + if len(topics) == 0 { + return fmt.Errorf("no topics provided") + } + + // Refresh metadata for requested topics + if err := c.client.RefreshMetadata(topics...); err != nil { + return err + } + + // Get coordinator + coordinator, err := c.client.Coordinator(c.groupID) + if err != nil { + return err + } + + // Init session + sess, err := c.newSession(ctx, coordinator, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max) + if err == ErrClosedClient { + return ErrClosedConsumerGroup + } else if err != nil { + return err + } + + // Wait for session exit signal + <-sess.ctx.Done() + + // Gracefully release session claims + return sess.release(true) +} + +func (c *consumerGroup) newSession(ctx context.Context, coordinator *Broker, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) { + // Join consumer group + join, err := c.joinGroupRequest(coordinator, topics) + if err != nil { + _ = coordinator.Close() + return nil, err + } + switch join.Err { + case ErrNoError: + c.memberID = join.MemberId + case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately + c.memberID = "" + return c.newSession(ctx, coordinator, topics, handler, retries) + case ErrRebalanceInProgress: // retry after backoff + if retries <= 0 { + return nil, join.Err + } + + select { + case <-c.closed: + return nil, ErrClosedConsumerGroup + case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): + } + + return c.newSession(ctx, coordinator, topics, handler, retries-1) + default: + return nil, join.Err + } + + // Prepare distribution plan if we joined as the leader + var plan BalanceStrategyPlan + if join.LeaderId == join.MemberId { + members, err := join.GetMembers() + if err != nil { + return nil, err + } + + plan, err = c.balance(members) + if err != nil { + return nil, err + } + } + + // Sync consumer group + sync, err := c.syncGroupRequest(coordinator, plan, join.GenerationId) + if err != nil { + _ = coordinator.Close() + return nil, err + } + switch sync.Err { + case ErrNoError: + case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately + c.memberID = "" + return c.newSession(ctx, coordinator, topics, handler, retries) + case ErrRebalanceInProgress: // retry after backoff + if retries <= 0 { + return nil, sync.Err + } + + select { + case <-c.closed: + return nil, ErrClosedConsumerGroup + case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): + } + + return c.newSession(ctx, coordinator, topics, handler, retries-1) + default: + return nil, sync.Err + } + + // Retrieve and sort claims + var claims map[string][]int32 + if len(sync.MemberAssignment) > 0 { + members, err := sync.GetMemberAssignment() + if err != nil { + return nil, err + } + claims = members.Topics + + for _, partitions := range claims { + sort.Sort(int32Slice(partitions)) + } + } + + return newConsumerGroupSession(c, ctx, claims, join.MemberId, join.GenerationId, handler) +} + +func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) { + req := &JoinGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond), + ProtocolType: "consumer", + } + if c.config.Version.IsAtLeast(V0_10_1_0) { + req.Version = 1 + req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond) + } + + meta := &ConsumerGroupMemberMetadata{ + Topics: topics, + UserData: c.config.Consumer.Group.Member.UserData, + } + strategy := c.config.Consumer.Group.Rebalance.Strategy + if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { + return nil, err + } + + return coordinator.JoinGroup(req) +} + +func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) { + req := &SyncGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + GenerationId: generationID, + } + for memberID, topics := range plan { + err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{ + Topics: topics, + }) + if err != nil { + return nil, err + } + } + return coordinator.SyncGroup(req) +} + +func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) { + req := &HeartbeatRequest{ + GroupId: c.groupID, + MemberId: memberID, + GenerationId: generationID, + } + + return coordinator.Heartbeat(req) +} + +func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) { + topics := make(map[string][]int32) + for _, meta := range members { + for _, topic := range meta.Topics { + topics[topic] = nil + } + } + + for topic := range topics { + partitions, err := c.client.Partitions(topic) + if err != nil { + return nil, err + } + topics[topic] = partitions + } + + strategy := c.config.Consumer.Group.Rebalance.Strategy + return strategy.Plan(members, topics) +} + +// Leaves the cluster, called by Close, protected by lock. +func (c *consumerGroup) leave() error { + if c.memberID == "" { + return nil + } + + coordinator, err := c.client.Coordinator(c.groupID) + if err != nil { + return err + } + + resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + }) + if err != nil { + _ = coordinator.Close() + return err + } + + // Unset memberID + c.memberID = "" + + // Check response + switch resp.Err { + case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError: + return nil + default: + return resp.Err + } +} + +func (c *consumerGroup) handleError(err error, topic string, partition int32) { + select { + case <-c.closed: + return + default: + } + + if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 { + err = &ConsumerError{ + Topic: topic, + Partition: partition, + Err: err, + } + } + + if c.config.Consumer.Return.Errors { + select { + case c.errors <- err: + default: + } + } else { + Logger.Println(err) + } +} + +// -------------------------------------------------------------------- + +// ConsumerGroupSession represents a consumer group member session. +type ConsumerGroupSession interface { + // Claims returns information about the claimed partitions by topic. + Claims() map[string][]int32 + + // MemberID returns the cluster member ID. + MemberID() string + + // GenerationID returns the current generation ID. + GenerationID() int32 + + // MarkOffset marks the provided offset, alongside a metadata string + // that represents the state of the partition consumer at that point in time. The + // metadata string can be used by another consumer to restore that state, so it + // can resume consumption. + // + // To follow upstream conventions, you are expected to mark the offset of the + // next message to read, not the last message read. Thus, when calling `MarkOffset` + // you should typically add one to the offset of the last consumed message. + // + // Note: calling MarkOffset does not necessarily commit the offset to the backend + // store immediately for efficiency reasons, and it may never be committed if + // your application crashes. This means that you may end up processing the same + // message twice, and your processing should ideally be idempotent. + MarkOffset(topic string, partition int32, offset int64, metadata string) + + // ResetOffset resets to the provided offset, alongside a metadata string that + // represents the state of the partition consumer at that point in time. Reset + // acts as a counterpart to MarkOffset, the difference being that it allows to + // reset an offset to an earlier or smaller value, where MarkOffset only + // allows incrementing the offset. cf MarkOffset for more details. + ResetOffset(topic string, partition int32, offset int64, metadata string) + + // MarkMessage marks a message as consumed. + MarkMessage(msg *ConsumerMessage, metadata string) + + // Context returns the session context. + Context() context.Context +} + +type consumerGroupSession struct { + parent *consumerGroup + memberID string + generationID int32 + handler ConsumerGroupHandler + + claims map[string][]int32 + offsets *offsetManager + ctx context.Context + cancel func() + + waitGroup sync.WaitGroup + releaseOnce sync.Once + hbDying, hbDead chan none +} + +func newConsumerGroupSession(parent *consumerGroup, ctx context.Context, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) { + // init offset manager + offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client) + if err != nil { + return nil, err + } + + // init context + ctx, cancel := context.WithCancel(ctx) + + // init session + sess := &consumerGroupSession{ + parent: parent, + memberID: memberID, + generationID: generationID, + handler: handler, + offsets: offsets, + claims: claims, + ctx: ctx, + cancel: cancel, + hbDying: make(chan none), + hbDead: make(chan none), + } + + // start heartbeat loop + go sess.heartbeatLoop() + + // create a POM for each claim + for topic, partitions := range claims { + for _, partition := range partitions { + pom, err := offsets.ManagePartition(topic, partition) + if err != nil { + _ = sess.release(false) + return nil, err + } + + // handle POM errors + go func(topic string, partition int32) { + for err := range pom.Errors() { + sess.parent.handleError(err, topic, partition) + } + }(topic, partition) + } + } + + // perform setup + if err := handler.Setup(sess); err != nil { + _ = sess.release(true) + return nil, err + } + + // start consuming + for topic, partitions := range claims { + for _, partition := range partitions { + sess.waitGroup.Add(1) + + go func(topic string, partition int32) { + defer sess.waitGroup.Done() + + // cancel the as session as soon as the first + // goroutine exits + defer sess.cancel() + + // consume a single topic/partition, blocking + sess.consume(topic, partition) + }(topic, partition) + } + } + return sess, nil +} + +func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims } +func (s *consumerGroupSession) MemberID() string { return s.memberID } +func (s *consumerGroupSession) GenerationID() int32 { return s.generationID } + +func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + if pom := s.offsets.findPOM(topic, partition); pom != nil { + pom.MarkOffset(offset, metadata) + } +} + +func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + if pom := s.offsets.findPOM(topic, partition); pom != nil { + pom.ResetOffset(offset, metadata) + } +} + +func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) { + s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata) +} + +func (s *consumerGroupSession) Context() context.Context { + return s.ctx +} + +func (s *consumerGroupSession) consume(topic string, partition int32) { + // quick exit if rebalance is due + select { + case <-s.ctx.Done(): + return + case <-s.parent.closed: + return + default: + } + + // get next offset + offset := s.parent.config.Consumer.Offsets.Initial + if pom := s.offsets.findPOM(topic, partition); pom != nil { + offset, _ = pom.NextOffset() + } + + // create new claim + claim, err := newConsumerGroupClaim(s, topic, partition, offset) + if err != nil { + s.parent.handleError(err, topic, partition) + return + } + + // handle errors + go func() { + for err := range claim.Errors() { + s.parent.handleError(err, topic, partition) + } + }() + + // trigger close when session is done + go func() { + select { + case <-s.ctx.Done(): + case <-s.parent.closed: + } + claim.AsyncClose() + }() + + // start processing + if err := s.handler.ConsumeClaim(s, claim); err != nil { + s.parent.handleError(err, topic, partition) + } + + // ensure consumer is clased & drained + claim.AsyncClose() + for _, err := range claim.waitClosed() { + s.parent.handleError(err, topic, partition) + } +} + +func (s *consumerGroupSession) release(withCleanup bool) (err error) { + // signal release, stop heartbeat + s.cancel() + + // wait for consumers to exit + s.waitGroup.Wait() + + // perform release + s.releaseOnce.Do(func() { + if withCleanup { + if e := s.handler.Cleanup(s); e != nil { + s.parent.handleError(err, "", -1) + err = e + } + } + + if e := s.offsets.Close(); e != nil { + err = e + } + + close(s.hbDying) + <-s.hbDead + }) + + return +} + +func (s *consumerGroupSession) heartbeatLoop() { + defer close(s.hbDead) + defer s.cancel() // trigger the end of the session on exit + + pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval) + defer pause.Stop() + + retries := s.parent.config.Metadata.Retry.Max + for { + coordinator, err := s.parent.client.Coordinator(s.parent.groupID) + if err != nil { + if retries <= 0 { + s.parent.handleError(err, "", -1) + return + } + + select { + case <-s.hbDying: + return + case <-time.After(s.parent.config.Metadata.Retry.Backoff): + retries-- + } + continue + } + + resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID) + if err != nil { + _ = coordinator.Close() + retries-- + continue + } + + switch resp.Err { + case ErrNoError: + retries = s.parent.config.Metadata.Retry.Max + case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration: + return + default: + s.parent.handleError(err, "", -1) + return + } + + select { + case <-pause.C: + case <-s.hbDying: + return + } + } +} + +// -------------------------------------------------------------------- + +// ConsumerGroupHandler instances are used to handle individual topic/partition claims. +// It also provides hooks for your consumer group session life-cycle and allow you to +// trigger logic before or after the consume loop(s). +// +// PLEASE NOTE that handlers are likely be called from several goroutines concurrently, +// ensure that all state is safely protected against race conditions. +type ConsumerGroupHandler interface { + // Setup is run at the beginning of a new session, before ConsumeClaim. + Setup(ConsumerGroupSession) error + + // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exites + // but before the offsets are committed for the very last time. + Cleanup(ConsumerGroupSession) error + + // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). + // Once the Messages() channel is closed, the Handler must finish its processing + // loop and exit. + ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error +} + +// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group. +type ConsumerGroupClaim interface { + // Topic returns the consumed topic name. + Topic() string + + // Partition returns the consumed partition. + Partition() int32 + + // InitialOffset returns the initial offset that was used as a starting point for this claim. + InitialOffset() int64 + + // HighWaterMarkOffset returns the high water mark offset of the partition, + // i.e. the offset that will be used for the next message that will be produced. + // You can use this to determine how far behind the processing is. + HighWaterMarkOffset() int64 + + // Messages returns the read channel for the messages that are returned by + // the broker. The messages channel will be closed when a new rebalance cycle + // is due. You must finish processing and mark offsets within + // Config.Consumer.Group.Session.Timeout before the topic/partition is eventually + // re-assigned to another group member. + Messages() <-chan *ConsumerMessage +} + +type consumerGroupClaim struct { + topic string + partition int32 + offset int64 + PartitionConsumer +} + +func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) { + pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset) + if err == ErrOffsetOutOfRange { + offset = sess.parent.config.Consumer.Offsets.Initial + pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset) + } + if err != nil { + return nil, err + } + + go func() { + for err := range pcm.Errors() { + sess.parent.handleError(err, topic, partition) + } + }() + + return &consumerGroupClaim{ + topic: topic, + partition: partition, + offset: offset, + PartitionConsumer: pcm, + }, nil +} + +func (c *consumerGroupClaim) Topic() string { return c.topic } +func (c *consumerGroupClaim) Partition() int32 { return c.partition } +func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset } + +// Drains messages and errors, ensures the claim is fully closed. +func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) { + go func() { + for range c.Messages() { + } + }() + + for err := range c.Errors() { + errs = append(errs, err) + } + return +} diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go index ae91bb9eb09..dade1c47dac 100644 --- a/vendor/github.com/Shopify/sarama/fetch_response.go +++ b/vendor/github.com/Shopify/sarama/fetch_response.go @@ -104,15 +104,26 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) return err } - // If we have at least one full records, we skip incomplete ones - if partial && len(b.RecordsSet) > 0 { - break + n, err := records.numRecords() + if err != nil { + return err } - b.RecordsSet = append(b.RecordsSet, records) + if n > 0 || (partial && len(b.RecordsSet) == 0) { + b.RecordsSet = append(b.RecordsSet, records) + + if b.Records == nil { + b.Records = records + } + } - if b.Records == nil { - b.Records = records + overflow, err := records.isOverflow() + if err != nil { + return err + } + + if partial || overflow { + break } } diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go index 576b1a6f6f8..da199a70a0e 100644 --- a/vendor/github.com/Shopify/sarama/length_field.go +++ b/vendor/github.com/Shopify/sarama/length_field.go @@ -5,6 +5,19 @@ import "encoding/binary" // LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. type lengthField struct { startOffset int + length int32 +} + +func (l *lengthField) decode(pd packetDecoder) error { + var err error + l.length, err = pd.getInt32() + if err != nil { + return err + } + if l.length > int32(pd.remaining()) { + return ErrInsufficientData + } + return nil } func (l *lengthField) saveOffset(in int) { @@ -21,7 +34,7 @@ func (l *lengthField) run(curOffset int, buf []byte) error { } func (l *lengthField) check(curOffset int, buf []byte) error { - if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) { + if int32(curOffset-l.startOffset-4) != l.length { return PacketDecodingError{"length field invalid"} } diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go index 27db52fdf1f..600c7c4dfb7 100644 --- a/vendor/github.com/Shopify/sarama/message_set.go +++ b/vendor/github.com/Shopify/sarama/message_set.go @@ -47,6 +47,7 @@ func (msb *MessageBlock) decode(pd packetDecoder) (err error) { type MessageSet struct { PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock + OverflowMessage bool // whether the set on the wire contained an overflow message Messages []*MessageBlock } @@ -85,7 +86,12 @@ func (ms *MessageSet) decode(pd packetDecoder) (err error) { case ErrInsufficientData: // As an optimization the server is allowed to return a partial message at the // end of the message set. Clients should handle this case. So we just ignore such things. - ms.PartialTrailingMessage = true + if msb.Offset == -1 { + // This is an overflow message caused by chunked down conversion + ms.OverflowMessage = true + } else { + ms.PartialTrailingMessage = true + } return nil default: return err diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go index 48adfa28cb9..17dc4289a3a 100644 --- a/vendor/github.com/Shopify/sarama/metadata_request.go +++ b/vendor/github.com/Shopify/sarama/metadata_request.go @@ -10,7 +10,7 @@ func (r *MetadataRequest) encode(pe packetEncoder) error { if r.Version < 0 || r.Version > 5 { return PacketEncodingError{"invalid or unsupported MetadataRequest version field"} } - if r.Version == 0 || r.Topics != nil || len(r.Topics) > 0 { + if r.Version == 0 || len(r.Topics) > 0 { err := pe.putArrayLength(len(r.Topics)) if err != nil { return err diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go index bf8a67bbc52..c402d05fa37 100644 --- a/vendor/github.com/Shopify/sarama/metadata_response.go +++ b/vendor/github.com/Shopify/sarama/metadata_response.go @@ -207,6 +207,10 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { } func (r *MetadataResponse) encode(pe packetEncoder) error { + if r.Version >= 3 { + pe.putInt32(r.ThrottleTimeMs) + } + err := pe.putArrayLength(len(r.Brokers)) if err != nil { return err @@ -218,6 +222,13 @@ func (r *MetadataResponse) encode(pe packetEncoder) error { } } + if r.Version >= 2 { + err := pe.putNullableString(r.ClusterID) + if err != nil { + return err + } + } + if r.Version >= 1 { pe.putInt32(r.ControllerID) } diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go index 6c01f959e99..8ea857f8351 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/Shopify/sarama/offset_manager.go @@ -25,27 +25,49 @@ type offsetManager struct { client Client conf *Config group string + ticker *time.Ticker - lock sync.Mutex - poms map[string]map[int32]*partitionOffsetManager - boms map[*Broker]*brokerOffsetManager + memberID string + generation int32 + + broker *Broker + brokerLock sync.RWMutex + + poms map[string]map[int32]*partitionOffsetManager + pomsLock sync.RWMutex + + closeOnce sync.Once + closing chan none + closed chan none } // NewOffsetManagerFromClient creates a new OffsetManager from the given client. // It is still necessary to call Close() on the underlying client when finished with the partition manager. func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) { + return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client) +} + +func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) { // Check that we are not dealing with a closed Client before processing any other arguments if client.Closed() { return nil, ErrClosedClient } + conf := client.Config() om := &offsetManager{ client: client, - conf: client.Config(), + conf: conf, group: group, + ticker: time.NewTicker(conf.Consumer.Offsets.CommitInterval), poms: make(map[string]map[int32]*partitionOffsetManager), - boms: make(map[*Broker]*brokerOffsetManager), + + memberID: memberID, + generation: generation, + + closing: make(chan none), + closed: make(chan none), } + go withRecover(om.mainLoop) return om, nil } @@ -56,8 +78,8 @@ func (om *offsetManager) ManagePartition(topic string, partition int32) (Partiti return nil, err } - om.lock.Lock() - defer om.lock.Unlock() + om.pomsLock.Lock() + defer om.pomsLock.Unlock() topicManagers := om.poms[topic] if topicManagers == nil { @@ -74,53 +96,307 @@ func (om *offsetManager) ManagePartition(topic string, partition int32) (Partiti } func (om *offsetManager) Close() error { + om.closeOnce.Do(func() { + // exit the mainLoop + close(om.closing) + <-om.closed + + // mark all POMs as closed + om.asyncClosePOMs() + + // flush one last time + for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ { + om.flushToBroker() + if om.releasePOMs(false) == 0 { + break + } + } + + om.releasePOMs(true) + om.brokerLock.Lock() + om.broker = nil + om.brokerLock.Unlock() + }) return nil } -func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager { - om.lock.Lock() - defer om.lock.Unlock() +func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) { + broker, err := om.coordinator() + if err != nil { + if retries <= 0 { + return 0, "", err + } + return om.fetchInitialOffset(topic, partition, retries-1) + } + + req := new(OffsetFetchRequest) + req.Version = 1 + req.ConsumerGroup = om.group + req.AddPartition(topic, partition) + + resp, err := broker.FetchOffset(req) + if err != nil { + if retries <= 0 { + return 0, "", err + } + om.releaseCoordinator(broker) + return om.fetchInitialOffset(topic, partition, retries-1) + } + + block := resp.GetBlock(topic, partition) + if block == nil { + return 0, "", ErrIncompleteResponse + } + + switch block.Err { + case ErrNoError: + return block.Offset, block.Metadata, nil + case ErrNotCoordinatorForConsumer: + if retries <= 0 { + return 0, "", block.Err + } + om.releaseCoordinator(broker) + return om.fetchInitialOffset(topic, partition, retries-1) + case ErrOffsetsLoadInProgress: + if retries <= 0 { + return 0, "", block.Err + } + select { + case <-om.closing: + return 0, "", block.Err + case <-time.After(om.conf.Metadata.Retry.Backoff): + } + return om.fetchInitialOffset(topic, partition, retries-1) + default: + return 0, "", block.Err + } +} + +func (om *offsetManager) coordinator() (*Broker, error) { + om.brokerLock.RLock() + broker := om.broker + om.brokerLock.RUnlock() - bom := om.boms[broker] - if bom == nil { - bom = om.newBrokerOffsetManager(broker) - om.boms[broker] = bom + if broker != nil { + return broker, nil } - bom.refs++ + om.brokerLock.Lock() + defer om.brokerLock.Unlock() - return bom + if broker := om.broker; broker != nil { + return broker, nil + } + + if err := om.client.RefreshCoordinator(om.group); err != nil { + return nil, err + } + + broker, err := om.client.Coordinator(om.group) + if err != nil { + return nil, err + } + + om.broker = broker + return broker, nil } -func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) { - om.lock.Lock() - defer om.lock.Unlock() +func (om *offsetManager) releaseCoordinator(b *Broker) { + om.brokerLock.Lock() + if om.broker == b { + om.broker = nil + } + om.brokerLock.Unlock() +} - bom.refs-- +func (om *offsetManager) mainLoop() { + defer om.ticker.Stop() + defer close(om.closed) - if bom.refs == 0 { - close(bom.updateSubscriptions) - if om.boms[bom.broker] == bom { - delete(om.boms, bom.broker) + for { + select { + case <-om.ticker.C: + om.flushToBroker() + om.releasePOMs(false) + case <-om.closing: + return } } } -func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) { - om.lock.Lock() - defer om.lock.Unlock() +func (om *offsetManager) flushToBroker() { + req := om.constructRequest() + if req == nil { + return + } + + broker, err := om.coordinator() + if err != nil { + om.handleError(err) + return + } + + resp, err := broker.CommitOffset(req) + if err != nil { + om.handleError(err) + om.releaseCoordinator(broker) + _ = broker.Close() + return + } - delete(om.boms, bom.broker) + om.handleResponse(broker, req, resp) } -func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) { - om.lock.Lock() - defer om.lock.Unlock() +func (om *offsetManager) constructRequest() *OffsetCommitRequest { + var r *OffsetCommitRequest + var perPartitionTimestamp int64 + if om.conf.Consumer.Offsets.Retention == 0 { + perPartitionTimestamp = ReceiveTime + r = &OffsetCommitRequest{ + Version: 1, + ConsumerGroup: om.group, + ConsumerID: om.memberID, + ConsumerGroupGeneration: om.generation, + } + } else { + r = &OffsetCommitRequest{ + Version: 2, + RetentionTime: int64(om.conf.Consumer.Offsets.Retention / time.Millisecond), + ConsumerGroup: om.group, + ConsumerID: om.memberID, + ConsumerGroupGeneration: om.generation, + } - delete(om.poms[pom.topic], pom.partition) - if len(om.poms[pom.topic]) == 0 { - delete(om.poms, pom.topic) } + + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + pom.lock.Lock() + if pom.dirty { + r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata) + } + pom.lock.Unlock() + } + } + + if len(r.blocks) > 0 { + return r + } + + return nil +} + +func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil { + continue + } + + var err KError + var ok bool + + if resp.Errors[pom.topic] == nil { + pom.handleError(ErrIncompleteResponse) + continue + } + if err, ok = resp.Errors[pom.topic][pom.partition]; !ok { + pom.handleError(ErrIncompleteResponse) + continue + } + + switch err { + case ErrNoError: + block := req.blocks[pom.topic][pom.partition] + pom.updateCommitted(block.offset, block.metadata) + case ErrNotLeaderForPartition, ErrLeaderNotAvailable, + ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: + // not a critical error, we just need to redispatch + om.releaseCoordinator(broker) + case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize: + // nothing we can do about this, just tell the user and carry on + pom.handleError(err) + case ErrOffsetsLoadInProgress: + // nothing wrong but we didn't commit, we'll get it next time round + break + case ErrUnknownTopicOrPartition: + // let the user know *and* try redispatching - if topic-auto-create is + // enabled, redispatching should trigger a metadata req and create the + // topic; if not then re-dispatching won't help, but we've let the user + // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) + fallthrough + default: + // dunno, tell the user and try redispatching + pom.handleError(err) + om.releaseCoordinator(broker) + } + } + } +} + +func (om *offsetManager) handleError(err error) { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + pom.handleError(err) + } + } +} + +func (om *offsetManager) asyncClosePOMs() { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + pom.AsyncClose() + } + } +} + +// Releases/removes closed POMs once they are clean (or when forced) +func (om *offsetManager) releasePOMs(force bool) (remaining int) { + om.pomsLock.Lock() + defer om.pomsLock.Unlock() + + for topic, topicManagers := range om.poms { + for partition, pom := range topicManagers { + pom.lock.Lock() + releaseDue := pom.done && (force || !pom.dirty) + pom.lock.Unlock() + + if releaseDue { + pom.release() + + delete(om.poms[topic], partition) + if len(om.poms[topic]) == 0 { + delete(om.poms, topic) + } + } + } + remaining += len(om.poms[topic]) + } + return +} + +func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + if partitions, ok := om.poms[topic]; ok { + if pom, ok := partitions[partition]; ok { + return pom + } + } + return nil } // Partition Offset Manager @@ -187,138 +463,26 @@ type partitionOffsetManager struct { offset int64 metadata string dirty bool - clean sync.Cond - broker *brokerOffsetManager + done bool - errors chan *ConsumerError - rebalance chan none - dying chan none + releaseOnce sync.Once + errors chan *ConsumerError } func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) { - pom := &partitionOffsetManager{ + offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max) + if err != nil { + return nil, err + } + + return &partitionOffsetManager{ parent: om, topic: topic, partition: partition, errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), - rebalance: make(chan none, 1), - dying: make(chan none), - } - pom.clean.L = &pom.lock - - if err := pom.selectBroker(); err != nil { - return nil, err - } - - if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil { - return nil, err - } - - pom.broker.updateSubscriptions <- pom - - go withRecover(pom.mainLoop) - - return pom, nil -} - -func (pom *partitionOffsetManager) mainLoop() { - for { - select { - case <-pom.rebalance: - if err := pom.selectBroker(); err != nil { - pom.handleError(err) - pom.rebalance <- none{} - } else { - pom.broker.updateSubscriptions <- pom - } - case <-pom.dying: - if pom.broker != nil { - select { - case <-pom.rebalance: - case pom.broker.updateSubscriptions <- pom: - } - pom.parent.unrefBrokerOffsetManager(pom.broker) - } - pom.parent.abandonPartitionOffsetManager(pom) - close(pom.errors) - return - } - } -} - -func (pom *partitionOffsetManager) selectBroker() error { - if pom.broker != nil { - pom.parent.unrefBrokerOffsetManager(pom.broker) - pom.broker = nil - } - - var broker *Broker - var err error - - if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil { - return err - } - - if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil { - return err - } - - pom.broker = pom.parent.refBrokerOffsetManager(broker) - return nil -} - -func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error { - request := new(OffsetFetchRequest) - request.Version = 1 - request.ConsumerGroup = pom.parent.group - request.AddPartition(pom.topic, pom.partition) - - response, err := pom.broker.broker.FetchOffset(request) - if err != nil { - return err - } - - block := response.GetBlock(pom.topic, pom.partition) - if block == nil { - return ErrIncompleteResponse - } - - switch block.Err { - case ErrNoError: - pom.offset = block.Offset - pom.metadata = block.Metadata - return nil - case ErrNotCoordinatorForConsumer: - if retries <= 0 { - return block.Err - } - if err := pom.selectBroker(); err != nil { - return err - } - return pom.fetchInitialOffset(retries - 1) - case ErrOffsetsLoadInProgress: - if retries <= 0 { - return block.Err - } - time.Sleep(pom.parent.conf.Metadata.Retry.Backoff) - return pom.fetchInitialOffset(retries - 1) - default: - return block.Err - } -} - -func (pom *partitionOffsetManager) handleError(err error) { - cErr := &ConsumerError{ - Topic: pom.topic, - Partition: pom.partition, - Err: err, - } - - if pom.parent.conf.Consumer.Return.Errors { - pom.errors <- cErr - } else { - Logger.Println(cErr) - } + offset: offset, + metadata: metadata, + }, nil } func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError { @@ -353,7 +517,6 @@ func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string if pom.offset == offset && pom.metadata == metadata { pom.dirty = false - pom.clean.Signal() } } @@ -369,16 +532,9 @@ func (pom *partitionOffsetManager) NextOffset() (int64, string) { } func (pom *partitionOffsetManager) AsyncClose() { - go func() { - pom.lock.Lock() - defer pom.lock.Unlock() - - for pom.dirty { - pom.clean.Wait() - } - - close(pom.dying) - }() + pom.lock.Lock() + pom.done = true + pom.lock.Unlock() } func (pom *partitionOffsetManager) Close() error { @@ -395,166 +551,22 @@ func (pom *partitionOffsetManager) Close() error { return nil } -// Broker Offset Manager - -type brokerOffsetManager struct { - parent *offsetManager - broker *Broker - timer *time.Ticker - updateSubscriptions chan *partitionOffsetManager - subscriptions map[*partitionOffsetManager]none - refs int -} - -func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager { - bom := &brokerOffsetManager{ - parent: om, - broker: broker, - timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval), - updateSubscriptions: make(chan *partitionOffsetManager), - subscriptions: make(map[*partitionOffsetManager]none), - } - - go withRecover(bom.mainLoop) - - return bom -} - -func (bom *brokerOffsetManager) mainLoop() { - for { - select { - case <-bom.timer.C: - if len(bom.subscriptions) > 0 { - bom.flushToBroker() - } - case s, ok := <-bom.updateSubscriptions: - if !ok { - bom.timer.Stop() - return - } - if _, ok := bom.subscriptions[s]; ok { - delete(bom.subscriptions, s) - } else { - bom.subscriptions[s] = none{} - } - } - } -} - -func (bom *brokerOffsetManager) flushToBroker() { - request := bom.constructRequest() - if request == nil { - return - } - - response, err := bom.broker.CommitOffset(request) - - if err != nil { - bom.abort(err) - return - } - - for s := range bom.subscriptions { - if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil { - continue - } - - var err KError - var ok bool - - if response.Errors[s.topic] == nil { - s.handleError(ErrIncompleteResponse) - delete(bom.subscriptions, s) - s.rebalance <- none{} - continue - } - if err, ok = response.Errors[s.topic][s.partition]; !ok { - s.handleError(ErrIncompleteResponse) - delete(bom.subscriptions, s) - s.rebalance <- none{} - continue - } - - switch err { - case ErrNoError: - block := request.blocks[s.topic][s.partition] - s.updateCommitted(block.offset, block.metadata) - case ErrNotLeaderForPartition, ErrLeaderNotAvailable, - ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: - // not a critical error, we just need to redispatch - delete(bom.subscriptions, s) - s.rebalance <- none{} - case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize: - // nothing we can do about this, just tell the user and carry on - s.handleError(err) - case ErrOffsetsLoadInProgress: - // nothing wrong but we didn't commit, we'll get it next time round - break - case ErrUnknownTopicOrPartition: - // let the user know *and* try redispatching - if topic-auto-create is - // enabled, redispatching should trigger a metadata request and create the - // topic; if not then re-dispatching won't help, but we've let the user - // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) - fallthrough - default: - // dunno, tell the user and try redispatching - s.handleError(err) - delete(bom.subscriptions, s) - s.rebalance <- none{} - } +func (pom *partitionOffsetManager) handleError(err error) { + cErr := &ConsumerError{ + Topic: pom.topic, + Partition: pom.partition, + Err: err, } -} -func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest { - var r *OffsetCommitRequest - var perPartitionTimestamp int64 - if bom.parent.conf.Consumer.Offsets.Retention == 0 { - perPartitionTimestamp = ReceiveTime - r = &OffsetCommitRequest{ - Version: 1, - ConsumerGroup: bom.parent.group, - ConsumerGroupGeneration: GroupGenerationUndefined, - } + if pom.parent.conf.Consumer.Return.Errors { + pom.errors <- cErr } else { - r = &OffsetCommitRequest{ - Version: 2, - RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond), - ConsumerGroup: bom.parent.group, - ConsumerGroupGeneration: GroupGenerationUndefined, - } - - } - - for s := range bom.subscriptions { - s.lock.Lock() - if s.dirty { - r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata) - } - s.lock.Unlock() - } - - if len(r.blocks) > 0 { - return r + Logger.Println(cErr) } - - return nil } -func (bom *brokerOffsetManager) abort(err error) { - _ = bom.broker.Close() // we don't care about the error this might return, we already have one - bom.parent.abandonBroker(bom) - - for pom := range bom.subscriptions { - pom.handleError(err) - pom.rebalance <- none{} - } - - for s := range bom.updateSubscriptions { - if _, ok := bom.subscriptions[s]; !ok { - s.handleError(err) - s.rebalance <- none{} - } - } - - bom.subscriptions = make(map[*partitionOffsetManager]none) +func (pom *partitionOffsetManager) release() { + pom.releaseOnce.Do(func() { + go close(pom.errors) + }) } diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go index 301055bb070..192f5927b21 100644 --- a/vendor/github.com/Shopify/sarama/records.go +++ b/vendor/github.com/Shopify/sarama/records.go @@ -163,6 +163,27 @@ func (r *Records) isControl() (bool, error) { return false, fmt.Errorf("unknown records type: %v", r.recordsType) } +func (r *Records) isOverflow() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case unknownRecords: + return false, nil + case legacyRecords: + if r.MsgSet == nil { + return false, nil + } + return r.MsgSet.OverflowMessage, nil + case defaultRecords: + return false, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + func magicValue(pd packetDecoder) (int8, error) { dec, err := pd.peek(magicOffset, magicLength) if err != nil { diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go index 702e2262701..1bb00d761a4 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -155,6 +155,7 @@ var ( V0_11_0_2 = newKafkaVersion(0, 11, 0, 2) V1_0_0_0 = newKafkaVersion(1, 0, 0, 0) V1_1_0_0 = newKafkaVersion(1, 1, 0, 0) + V2_0_0_0 = newKafkaVersion(2, 0, 0, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -173,9 +174,10 @@ var ( V0_11_0_2, V1_0_0_0, V1_1_0_0, + V2_0_0_0, } MinVersion = V0_8_2_0 - MaxVersion = V1_1_0_0 + MaxVersion = V2_0_0_0 ) func ParseKafkaVersion(s string) (KafkaVersion, error) { diff --git a/vendor/github.com/knative/test-infra/.github/pull-request-template.md b/vendor/github.com/knative/test-infra/.github/pull-request-template.md new file mode 100644 index 00000000000..9b2b7820f61 --- /dev/null +++ b/vendor/github.com/knative/test-infra/.github/pull-request-template.md @@ -0,0 +1,7 @@ + + +Fixes # diff --git a/vendor/github.com/knative/test-infra/CONTRIBUTING.md b/vendor/github.com/knative/test-infra/CONTRIBUTING.md new file mode 100644 index 00000000000..bcfe857fda4 --- /dev/null +++ b/vendor/github.com/knative/test-infra/CONTRIBUTING.md @@ -0,0 +1,3 @@ +# Contribution guidelines + +So you want to hack on Knative Test Infrastructure? Yay! Please refer to Knative's overall [contribution guidelines](https://github.com/knative/docs/blob/master/community/CONTRIBUTING.md) to find out how you can help. diff --git a/vendor/github.com/knative/test-infra/Gopkg.lock b/vendor/github.com/knative/test-infra/Gopkg.lock new file mode 100644 index 00000000000..e0347ada5a9 --- /dev/null +++ b/vendor/github.com/knative/test-infra/Gopkg.lock @@ -0,0 +1,28 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/google/licenseclassifier" + packages = [ + ".", + "internal/sets", + "stringclassifier", + "stringclassifier/internal/pq", + "stringclassifier/searchset", + "stringclassifier/searchset/tokenizer" + ] + revision = "3c8ad1f0b0644b6646210ee9cf2f34ff907e2e18" + +[[projects]] + name = "github.com/sergi/go-diff" + packages = ["diffmatchpatch"] + revision = "1744e2970ca51c86172c8190fadad617561ed6e7" + version = "v1.0.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "aea50f9014005bedc3dc202c5fbf9d2d8c7a6f7beac2337fd863b23f411c4125" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/knative/test-infra/Gopkg.toml b/vendor/github.com/knative/test-infra/Gopkg.toml new file mode 100644 index 00000000000..1a03ba55a89 --- /dev/null +++ b/vendor/github.com/knative/test-infra/Gopkg.toml @@ -0,0 +1,14 @@ +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. + +required = [ + "github.com/google/licenseclassifier/licenses", +] + +# TODO(mattmoor): Find a way to bundle the licenseclassifier's +# license database, so folks don't have to go get it. + +[prune] + go-tests = true + unused-packages = true + non-go = true diff --git a/vendor/github.com/knative/test-infra/LICENSE b/vendor/github.com/knative/test-infra/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/knative/test-infra/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/knative/test-infra/OWNERS b/vendor/github.com/knative/test-infra/OWNERS new file mode 100644 index 00000000000..ed29d40151f --- /dev/null +++ b/vendor/github.com/knative/test-infra/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- adrcunha +- jessiezcc +- srinivashegde86 +- steuhs diff --git a/vendor/github.com/knative/test-infra/README.md b/vendor/github.com/knative/test-infra/README.md new file mode 100644 index 00000000000..88f40521e11 --- /dev/null +++ b/vendor/github.com/knative/test-infra/README.md @@ -0,0 +1,17 @@ +# Knative Test Infrastructure + +The `test-infra` repository contains a collection of tools for testing Knative, collecting metrics +and displaying test results. + +## High level architecture + +Knative uses [Prow](https://github.com/kubernetes/test-infra/tree/master/prow) to schedule testing and update issues. + +### Gubernator + +Knative uses [gubernator](https://github.com/kubernetes/test-infra) to provide +a [PR dashboard](https://gubernator.knative.dev/pr) for contributions in the Knative github organization. + +### E2E Testing + +Our E2E testing uses [kubetest](https://github.com/kubernetes/test-infra/blob/master/kubetest) to build/deploy/test Knative clusters. diff --git a/vendor/github.com/knative/test-infra/WORKSPACE b/vendor/github.com/knative/test-infra/WORKSPACE new file mode 100644 index 00000000000..91db673d566 --- /dev/null +++ b/vendor/github.com/knative/test-infra/WORKSPACE @@ -0,0 +1,52 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Required rules for building kubernetes/test-infra +# These all come from http://github.com/kubernetes/test-infra/blob/master/WORKSPACE + +http_archive( + name = "io_bazel_rules_go", + sha256 = "1868ff68d6079e31b2f09b828b58d62e57ca8e9636edff699247c9108518570b", + url = "https://github.com/bazelbuild/rules_go/releases/download/0.11.1/rules_go-0.11.1.tar.gz", +) + +load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains") + +go_rules_dependencies() + +go_register_toolchains( + go_version = "1.10.2", +) + +git_repository( + name = "io_bazel_rules_k8s", + commit = "3756369d4920033c32c12d16207e8ee14fee1b18", + remote = "https://github.com/bazelbuild/rules_k8s.git", +) + +http_archive( + name = "io_bazel_rules_docker", + sha256 = "cef4e7adfc1df999891e086bf42bed9092cfdf374adb902f18de2c1d6e1e0197", + strip_prefix = "rules_docker-198367210c55fba5dded22274adde1a289801dc4", + urls = ["https://github.com/bazelbuild/rules_docker/archive/198367210c55fba5dded22274adde1a289801dc4.tar.gz"], +) + +# External repositories + +git_repository( + name = "k8s", + remote = "http://github.com/kubernetes/test-infra.git", + commit = "dd12621d6178838097847abf5842ad8d08fc9308", # HEAD as of 8/1/2018 +) + diff --git a/vendor/github.com/knative/test-infra/ci/README.md b/vendor/github.com/knative/test-infra/ci/README.md new file mode 100644 index 00000000000..51b28eddac4 --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/README.md @@ -0,0 +1,3 @@ +# Continuous Integration / Continuous Deployment system + +This directory contains the configs for all systems related to Knative's CI/CD system. diff --git a/vendor/github.com/knative/test-infra/ci/gubernator/Makefile b/vendor/github.com/knative/test-infra/ci/gubernator/Makefile new file mode 100644 index 00000000000..5307d95d94f --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/gubernator/Makefile @@ -0,0 +1,33 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +SRC := test-infra/gubernator + +deploy: + # Fetch latest source, patch for our instance + rm -fr test-infra + git clone http://github.com/kubernetes/test-infra.git + cp config.yaml $(SRC) + cp redir_github.py $(SRC) + sed -i -e '/^runtime: .*/a service: gubernator' $(SRC)/app.yaml + sed -i -e "/^handlers:/a\- url: /timeline\n script: redir_github.app\n" $(SRC)/app.yaml + sed -i -e 's/user:kubernetes/user:knative/' $(SRC)/view_pr.py + sed -i -e 's/Kubernetes/Knative/' $(SRC)/templates/index.html + sed -i -e 's/k8s-testgrid.appspot.com/testgrid.knative.dev/' $(SRC)/filters.py + sed -i -e 's/k8s-testgrid/knative-testgrid/' $(SRC)/testgrid.py + # Deploy + make -C ../prow get-cluster-credentials + PROJECT=knative-tests make -C $(SRC) deploy + # Cleanup + rm -fr test-infra diff --git a/vendor/github.com/knative/test-infra/ci/gubernator/README.md b/vendor/github.com/knative/test-infra/ci/gubernator/README.md new file mode 100644 index 00000000000..14508c3dfda --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/gubernator/README.md @@ -0,0 +1,7 @@ +# Gubernator config + +This directory contains the config for our [Gubernator](https://github.com/kubernetes/test-infra/tree/master/gubernator) instance, plus a makefile for deploying it. + +* `config.yaml` Gubernator configuration. +* `Makefile` Recipe for deploying a Gubernator instance. +* `redir_github.py` Simple redirection handler to Gubernator's GitHub service. diff --git a/vendor/github.com/knative/test-infra/ci/gubernator/config.yaml b/vendor/github.com/knative/test-infra/ci/gubernator/config.yaml new file mode 100644 index 00000000000..794b4ce2bdd --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/gubernator/config.yaml @@ -0,0 +1,71 @@ + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +default_external_services: + gcs_pull_prefix: knative-prow/pr-logs/pull + prow_url: prow.knative.dev +default_org: knative +default_repo: serving +external_services: + knative: + gcs_bucket: knative-prow/ + gcs_pull_prefix: knative-prow/pr-logs/pull + prow_url: prow.knative.dev +jobs: + knative-prow/pr-logs/directory/: + - pull-knative-serving-build-tests + - pull-knative-serving-integration-tests + - pull-knative-serving-unit-tests + - pull-knative-eventing-build-tests + - pull-knative-eventing-integration-tests + - pull-knative-eventing-unit-tests + - pull-knative-eventing-sources-build-tests + - pull-knative-eventing-sources-integration-tests + - pull-knative-eventing-sources-unit-tests + - pull-knative-docs-build-tests + - pull-knative-docs-unit-tests + - pull-knative-docs-integration-tests + - pull-knative-build-templates-unit-tests + - pull-knative-build-templates-build-tests + - pull-knative-build-templates-integration-tests + - pull-knative-build-pipeline-build-tests + - pull-knative-build-pipeline-unit-tests + - pull-knative-build-build-tests + - pull-knative-build-unit-tests + - pull-knative-build-integration-tests + - pull-knative-pkg-build-tests + - pull-knative-pkg-unit-tests + - pull-knative-pkg-integration-tests + - pull-knative-test-infra-build-tests + - pull-knative-test-infra-unit-tests + - pull-knative-test-infra-integration-tests + - pull-knative-caching-build-tests + - pull-knative-caching-unit-tests + - pull-knative-caching-integration-tests + knative-prow/logs/: + - ci-knative-serving-continuous + - ci-knative-serving-release + - ci-knative-serving-playground + - ci-knative-build-continuous + - ci-knative-build-release + - ci-knative-eventing-continuous + - ci-knative-eventing-release + - ci-knative-eventing-sources-continuous + - ci-knative-eventing-sources-release + - ci-knative-build-templates-continuous + - ci-knative-docs-continuous + - ci-knative-pkg-continuous + - ci-knative-caching-continuous +recursive_artifacts: false diff --git a/vendor/github.com/knative/test-infra/ci/gubernator/redir_github.py b/vendor/github.com/knative/test-infra/ci/gubernator/redir_github.py new file mode 100644 index 00000000000..e168d6adbc5 --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/gubernator/redir_github.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Simple redirection handler to Gubernator's GitHub service.""" + +import webapp2 + +class GitHubRedirect(webapp2.RequestHandler): + def get(self): + self.redirect("https://github-dot-knative-tests.appspot.com" + self.request.path_qs) + +app = webapp2.WSGIApplication([(r'/.*', GitHubRedirect),], debug=True, config={}) diff --git a/vendor/github.com/knative/test-infra/ci/prow/Makefile b/vendor/github.com/knative/test-infra/ci/prow/Makefile new file mode 100644 index 00000000000..9b6fcfbea17 --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/prow/Makefile @@ -0,0 +1,42 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CLUSTER ?= prow +PROJECT ?= knative-tests +ZONE ?= us-central1-f +JOB_NAMESPACE ?= test-pods + +PROW_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + +get-cluster-credentials: + gcloud container clusters get-credentials "$(CLUSTER)" --project="$(PROJECT)" --zone="$(ZONE)" + +update-config: get-cluster-credentials + kubectl create configmap config --from-file=config.yaml=config.yaml --dry-run -o yaml | kubectl replace configmap config -f - + +update-plugins: get-cluster-credentials + kubectl create configmap plugins --from-file=plugins.yaml=plugins.yaml --dry-run -o yaml | kubectl replace configmap plugins -f - + +update-boskos: get-cluster-credentials + kubectl apply -f boskos/config.yaml + +update-boskos-config: get-cluster-credentials + kubectl create configmap resources --from-file=config=boskos/resources.yaml --dry-run -o yaml | kubectl --namespace="$(JOB_NAMESPACE)" replace configmap resources -f - + +update-cluster: get-cluster-credentials + kubectl apply -f cluster.yaml + +test: + bazel run @k8s//prow/cmd/config -- --plugin-config=$(PROW_DIR)/plugins.yaml + bazel run @k8s//prow/cmd/config -- --config-path=$(PROW_DIR)/config.yaml diff --git a/vendor/github.com/knative/test-infra/ci/prow/README.md b/vendor/github.com/knative/test-infra/ci/prow/README.md new file mode 100644 index 00000000000..04fd12e6bb2 --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/prow/README.md @@ -0,0 +1,10 @@ +# Prow config + +This directory contains the config for our [Prow](https://github.com/kubernetes/test-infra/tree/master/prow) instance. + +* `boskos` Configuration for the Boskos instance. +* `Makefile` Commands to interact with the Prow instance regarding updates. +* `cluster.yaml` Configuration of the Prow cluster. +* `config.yaml` Configuration of the Prow jobs. +* `config_start.yaml` Initial, empty configuration for Prow. +* `plugins.yaml` Configuration of the Prow plugins. diff --git a/vendor/github.com/knative/test-infra/ci/prow/boskos/README.md b/vendor/github.com/knative/test-infra/ci/prow/boskos/README.md new file mode 100644 index 00000000000..8e6e90ab6b1 --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/prow/boskos/README.md @@ -0,0 +1,6 @@ +# Boskos config + +This directory contains the config for our [Boskos](https://github.com/kubernetes/test-infra/tree/master/boskos) instance. + +* `config.yaml` Boskos configuration. +* `resources.yaml` Pool of projects used by Boskos. diff --git a/vendor/github.com/knative/test-infra/ci/prow/boskos/config.yaml b/vendor/github.com/knative/test-infra/ci/prow/boskos/config.yaml new file mode 100644 index 00000000000..a444d6cff8b --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/prow/boskos/config.yaml @@ -0,0 +1,152 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Boskos deployment for Knative Prow instance +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + labels: + app: boskos + name: boskos-storage + namespace: test-pods +spec: + claimRef: + name: boskos-volume-boskos-0 + namespace: test-pods + capacity: + storage: 1Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + gcePersistentDisk: + pdName: boskos-storage + fsType: ext4 +--- +# Start of StatefulSet +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: boskos + namespace: test-pods +spec: + serviceName: "boskos" + replicas: 1 # one canonical source of resources + template: + metadata: + labels: + app: boskos + namespace: test-pods + spec: + serviceAccountName: "boskos" + terminationGracePeriodSeconds: 30 + containers: + - name: boskos + image: gcr.io/k8s-testimages/boskos:v20180405-12e892d69 + args: + - --storage=/store/boskos.json + - --config=/etc/config/config + - --namespace=test-pods + ports: + - containerPort: 8080 + protocol: TCP + volumeMounts: + - name: boskos-volume + mountPath: /store + - name: boskos-config + mountPath: /etc/config + readOnly: true + volumes: + - name: boskos-config + configMap: + name: resources + volumeClaimTemplates: + - metadata: + name: boskos-volume + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: boskos + resources: + requests: + storage: 1Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: boskos + namespace: test-pods +spec: + selector: + app: boskos + ports: + - name: default + protocol: TCP + port: 80 + targetPort: 8080 +--- +# Janitor +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: boskos-janitor + labels: + app: boskos-janitor + namespace: test-pods +spec: + replicas: 3 # 3 distributed janitor instances + template: + metadata: + labels: + app: boskos-janitor + spec: + serviceAccountName: "boskos" + terminationGracePeriodSeconds: 300 + containers: + - name: boskos-janitor + image: gcr.io/k8s-testimages/janitor:v20180619-83c62c891 + args: + - --service-account=/etc/service-account/service-account.json + - --resource-type=gke-project + - --pool-size=10 + volumeMounts: + - mountPath: /etc/service-account + name: service + readOnly: true + volumes: + - name: service + secret: + secretName: service-account +--- +# Reaper +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: boskos-reaper + labels: + app: boskos-reaper + namespace: test-pods +spec: + replicas: 1 # one canonical source of resources + template: + metadata: + labels: + app: boskos-reaper + spec: + serviceAccountName: "boskos" + terminationGracePeriodSeconds: 30 + containers: + - name: boskos-reaper + image: gcr.io/k8s-testimages/reaper:v20180402-43203f868 + args: + - --resource-type=gke-project diff --git a/vendor/github.com/knative/test-infra/ci/prow/boskos/config_start.yaml b/vendor/github.com/knative/test-infra/ci/prow/boskos/config_start.yaml new file mode 100644 index 00000000000..dc8d66b6295 --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/prow/boskos/config_start.yaml @@ -0,0 +1,23 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Initial configuration of Boskos + +apiVersion: v1 +kind: ConfigMap +metadata: + name: resources + namespace: test-pods +data: + resources: "" diff --git a/vendor/github.com/knative/test-infra/ci/prow/boskos/resources.yaml b/vendor/github.com/knative/test-infra/ci/prow/boskos/resources.yaml new file mode 100644 index 00000000000..58f734358f6 --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/prow/boskos/resources.yaml @@ -0,0 +1,38 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +resources: +- names: + - knative-boskos-01 + - knative-boskos-02 + - knative-boskos-03 + - knative-boskos-04 + - knative-boskos-05 + - knative-boskos-06 + - knative-boskos-07 + - knative-boskos-08 + - knative-boskos-09 + - knative-boskos-10 + - knative-boskos-11 + - knative-boskos-12 + - knative-boskos-13 + - knative-boskos-14 + - knative-boskos-15 + - knative-boskos-16 + - knative-boskos-17 + - knative-boskos-18 + - knative-boskos-19 + - knative-boskos-20 + state: dirty + type: gke-project diff --git a/vendor/github.com/knative/test-infra/ci/prow/cluster.yaml b/vendor/github.com/knative/test-infra/ci/prow/cluster.yaml new file mode 100644 index 00000000000..5031611756f --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/prow/cluster.yaml @@ -0,0 +1,350 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file contains Kubernetes YAML files for the most important prow components. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: prowjobs.prow.k8s.io +spec: + group: prow.k8s.io + version: v1 + names: + kind: ProwJob + singular: prowjob + plural: prowjobs + scope: Namespaced + validation: + openAPIV3Schema: + properties: + spec: + properties: + max_concurrency: + type: integer + minimum: 0 + type: + type: string + enum: + - "presubmit" + - "postsubmit" + - "periodic" + - "batch" + status: + properties: + state: + type: string + enum: + - "triggered" + - "pending" + - "success" + - "failure" + - "aborted" + - "error" + anyOf: + - not: + properties: + state: + type: string + enum: + - "success" + - "failure" + - "error" + - "aborted" + - required: + - completionTime +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: hook + labels: + app: hook +spec: + replicas: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + app: hook + spec: + serviceAccountName: "hook" + terminationGracePeriodSeconds: 180 + containers: + - name: hook + image: gcr.io/k8s-prow/hook:v20181023-ca14137 + imagePullPolicy: Always + args: + - --dry-run=false + ports: + - name: http + containerPort: 8888 + volumeMounts: + - name: hmac + mountPath: /etc/webhook + readOnly: true + - name: oauth + mountPath: /etc/github + readOnly: true + - name: config + mountPath: /etc/config + readOnly: true + - name: plugins + mountPath: /etc/plugins + readOnly: true + volumes: + - name: hmac + secret: + secretName: hmac-token + - name: oauth + secret: + secretName: oauth-token + - name: config + configMap: + name: config + - name: plugins + configMap: + name: plugins +--- +apiVersion: v1 +kind: Service +metadata: + name: hook +spec: + selector: + app: hook + ports: + - port: 8888 + type: NodePort +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: plank + labels: + app: plank +spec: + replicas: 1 # Do not scale up. + template: + metadata: + labels: + app: plank + spec: + serviceAccountName: "plank" + containers: + - name: plank + image: gcr.io/k8s-prow/plank:v20180709-7109caeb1 + args: + - --dry-run=false + volumeMounts: + - name: oauth + mountPath: /etc/github + readOnly: true + - name: config + mountPath: /etc/config + readOnly: true + volumes: + - name: oauth + secret: + secretName: oauth-token + - name: config + configMap: + name: config +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: sinker + labels: + app: sinker +spec: + replicas: 1 + template: + metadata: + labels: + app: sinker + spec: + serviceAccountName: "sinker" + containers: + - name: sinker + image: gcr.io/k8s-prow/sinker:v20180709-7109caeb1 + volumeMounts: + - name: config + mountPath: /etc/config + readOnly: true + volumes: + - name: config + configMap: + name: config +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: deck + labels: + app: deck +spec: + replicas: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + app: deck + spec: + serviceAccountName: "deck" + terminationGracePeriodSeconds: 30 + containers: + - name: deck + image: gcr.io/k8s-prow/deck:v20180709-7109caeb1 + args: + - --hook-url=http://hook:8888/plugin-help + - --tide-url=http://tide/ + ports: + - name: http + containerPort: 8080 + volumeMounts: + - name: config + mountPath: /etc/config + readOnly: true + volumes: + - name: config + configMap: + name: config +--- +apiVersion: v1 +kind: Service +metadata: + name: deck +spec: + selector: + app: deck + ports: + - port: 80 + targetPort: 8080 + type: NodePort +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: horologium + labels: + app: horologium +spec: + replicas: 1 + template: + metadata: + labels: + app: horologium + spec: + serviceAccountName: "horologium" + terminationGracePeriodSeconds: 30 + containers: + - name: horologium + image: gcr.io/k8s-prow/horologium:v20180709-7109caeb1 + volumeMounts: + - name: config + mountPath: /etc/config + readOnly: true + volumes: + - name: config + configMap: + name: config + +# Ingresses + +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: deck-ing + annotations: + kubernetes.io/ingress.class: "gce" + kubernetes.io/ingress.global-static-ip-name: prow-ingress +spec: + tls: + - secretName: tls-secret + hosts: + - prow.knative.dev + rules: + - host: prow.knative.dev + http: + paths: + - path: /* + backend: + serviceName: deck + servicePort: 80 + - path: /hook + backend: + serviceName: hook + servicePort: 8888 + +# Tide + +apiVersion: v1 +kind: Service +metadata: + name: tide +spec: + selector: + app: tide + ports: + - port: 80 + targetPort: 8888 + type: NodePort +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: tide + labels: + app: tide +spec: + replicas: 1 # Do not scale up. + template: + metadata: + labels: + app: tide + spec: + serviceAccountName: "tide" + containers: + - name: tide + image: gcr.io/k8s-prow/tide:v20180808-68cee5a41 + args: + - --dry-run=false + ports: + - name: http + containerPort: 8888 + volumeMounts: + - name: oauth + mountPath: /etc/github + readOnly: true + - name: config + mountPath: /etc/config + readOnly: true + volumes: + - name: oauth + secret: + secretName: oauth-token + - name: config + configMap: + name: config + diff --git a/vendor/github.com/knative/test-infra/ci/prow/config.yaml b/vendor/github.com/knative/test-infra/ci/prow/config.yaml new file mode 100644 index 00000000000..86b707338af --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/prow/config.yaml @@ -0,0 +1,2211 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +plank: + job_url_template: 'https://gubernator.knative.dev/build/knative-prow/{{if or (eq .Spec.Type "presubmit") (eq .Spec.Type "batch")}}pr-logs/pull{{with .Spec.Refs}}/{{.Org}}_{{.Repo}}{{end}}{{else}}logs{{end}}{{if eq .Spec.Type "presubmit"}}/{{with index .Spec.Refs.Pulls 0}}{{.Number}}{{end}}{{else if eq .Spec.Type "batch"}}/batch{{end}}/{{.Spec.Job}}/{{.Status.BuildID}}/' + report_template: '[Full PR test history](https://gubernator.knative.dev/pr/{{.Spec.Refs.Org}}_{{.Spec.Refs.Repo}}/{{with index .Spec.Refs.Pulls 0}}{{.Number}}{{end}}). [Your PR dashboard](https://gubernator.knative.dev/pr/{{with index .Spec.Refs.Pulls 0}}{{.Author}}{{end}}).' + pod_pending_timeout: 60m + default_decoration_config: + timeout: 7200000000000 # 2h + grace_period: 15000000000 # 15s + utility_images: + clonerefs: "gcr.io/k8s-prow/clonerefs@sha256:b62ba1f379ac19c5ec9ee7bcab14d3f0b3c31cea9cdd4bc491e98e2c5f346c07" + initupload: "gcr.io/k8s-prow/initupload@sha256:58f89f2aae68f7dc46aaf05c7e8204c4f26b53ec9ce30353d1c27ce44a60d121" + entrypoint: "gcr.io/k8s-prow/entrypoint:v20180512-0255926d1" + sidecar: "gcr.io/k8s-prow/sidecar@sha256:8807b2565f4d2699920542fcf890878824b1ede4198d7ff46bca53feb064ed44" + gcs_configuration: + bucket: "knative-prow" + path_strategy: "explicit" + gcs_credentials_secret: "service-account" + +prowjob_namespace: default +pod_namespace: test-pods +log_level: info + +branch-protection: + orgs: + knative: + # Protect all branches in knative + # This means all prow jobs with "always_run" set are required + # to pass before tide can merge the PR. + # Currently this is manually enabled by the knative org admins, + # but it's stated here for documentation and reference purposes. + protect: true + # Admins can overrule checks + enforce_admins: false + +tide: + queries: + - repos: + - knative/build + - knative/build-pipeline + - knative/build-templates + - knative/serving + - knative/eventing + - knative/eventing-sources + - knative/docs + - knative/test-infra + - knative/pkg + - knative/caching + labels: + - lgtm + - approved + missingLabels: + - do-not-merge/hold + - do-not-merge/work-in-progress + merge_method: + knative: squash + knative/build-pipeline: rebase + target_url: https://prow.knative.dev/tide.html + +presets: +- labels: + preset-service-account: "true" + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/service-account/service-account.json + volumes: + - name: service + secret: + secretName: service-account + volumeMounts: + - name: service + mountPath: /etc/service-account + readOnly: true +# storage / caching presets +- labels: + preset-bazel-scratch-dir: "true" + env: + - name: TEST_TMPDIR + value: /bazel-scratch/.cache/bazel + volumes: + - name: bazel-scratch + emptyDir: {} + volumeMounts: + - name: bazel-scratch + mountPath: /bazel-scratch/.cache +- labels: + preset-bazel-remote-cache-enabled: "false" + env: + - name: BAZEL_REMOTE_CACHE_ENABLED + value: "false" +# docker-in-docker presets +- labels: + preset-dind-enabled: "true" + env: + - name: DOCKER_IN_DOCKER_ENABLED + value: "true" + volumes: + - name: docker-graph + emptyDir: {} + volumeMounts: + - name: docker-graph + mountPath: /docker-graph + +presubmits: + knative/serving: + - name: pull-knative-serving-build-tests + agent: kubernetes + context: pull-knative-serving-build-tests + always_run: true + rerun_command: "/test pull-knative-serving-build-tests" + trigger: "(?m)^/test (all|pull-knative-serving-build-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--build-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-serving-unit-tests + agent: kubernetes + context: pull-knative-serving-unit-tests + always_run: true + rerun_command: "/test pull-knative-serving-unit-tests" + trigger: "(?m)^/test (all|pull-knative-serving-unit-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--unit-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-serving-integration-tests + agent: kubernetes + context: pull-knative-serving-integration-tests + always_run: true + rerun_command: "/test pull-knative-serving-integration-tests" + trigger: "(?m)^/test (all|pull-knative-serving-integration-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--integration-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-serving-go-coverage + labels: + preset-service-account: "true" + agent: kubernetes + context: pull-knative-serving-go-coverage + always_run: true + rerun_command: "/test pull-knative-serving-go-coverage" + trigger: "(?m)^/test (all|pull-knative-serving-go-coverage),?(\\s+|$)" + optional: true + decorate: true + clone_uri: "https://github.com/knative/serving.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--postsubmit-gcs-bucket=knative-prow" + - "--postsubmit-job-name=post-knative-serving-go-coverage" + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=50" + - "--github-token=/etc/github-token/token" + volumeMounts: + - name: github-token + mountPath: /etc/github-token + readOnly: true + volumes: + - name: github-token + secret: + secretName: covbot-token + + - name: pull-knative-serving-go-coverage-dev + labels: + preset-service-account: "true" + agent: kubernetes + context: pull-knative-serving-go-coverage-dev + always_run: false + rerun_command: "/test pull-knative-serving-go-coverage-dev" + trigger: "(?m)^/test (pull-knative-serving-go-coverage-dev),?(\\s+|$)" + optional: true + decorate: true + clone_uri: "https://github.com/knative/serving.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage-dev:latest-dev + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--postsubmit-gcs-bucket=knative-prow" + - "--postsubmit-job-name=post-knative-serving-go-coverage" + - "--profile-name=coverage_profile.txt" + - "--artifacts=$(ARTIFACTS)" + - "--cov-target=." + - "--cov-threshold-percentage=81" + - "--github-token=/etc/github-token/token" + volumeMounts: + - name: github-token + mountPath: /etc/github-token + readOnly: true + volumes: + - name: github-token + secret: + secretName: covbot-token + + knative/build: + - name: pull-knative-build-build-tests + agent: kubernetes + context: pull-knative-build-build-tests + always_run: true + rerun_command: "/test pull-knative-build-build-tests" + trigger: "(?m)^/test (all|pull-knative-build-build-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--build-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-build-unit-tests + agent: kubernetes + context: pull-knative-build-unit-tests + always_run: true + rerun_command: "/test pull-knative-build-unit-tests" + trigger: "(?m)^/test (all|pull-knative-build-unit-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--unit-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-build-integration-tests + agent: kubernetes + context: pull-knative-build-integration-tests + always_run: true + rerun_command: "/test pull-knative-build-integration-tests" + trigger: "(?m)^/test (all|pull-knative-build-integration-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + preset-dind-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--integration-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-build-go-coverage + labels: + preset-service-account: "true" + agent: kubernetes + context: pull-knative-build-go-coverage + always_run: true + rerun_command: "/test pull-knative-build-go-coverage" + trigger: "(?m)^/test (all|pull-knative-build-go-coverage),?(\\s+|$)" + optional: true + decorate: true + clone_uri: "https://github.com/knative/build.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--postsubmit-gcs-bucket=knative-prow" + - "--postsubmit-job-name=post-knative-build-go-coverage" + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=50" + - "--github-token=/etc/github-token/token" + volumeMounts: + - name: github-token + mountPath: /etc/github-token + readOnly: true + volumes: + - name: github-token + secret: + secretName: covbot-token + + knative/build-pipeline: + - name: pull-knative-build-pipeline-build-tests + agent: kubernetes + context: pull-knative-build-pipeline-build-tests + always_run: true + rerun_command: "/test pull-knative-build-pipeline-build-tests" + trigger: "(?m)^/test (all|pull-knative-build-pipeline-build-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--build-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-build-pipeline-unit-tests + agent: kubernetes + context: pull-knative-build-pipeline-unit-tests + always_run: true + rerun_command: "/test pull-knative-build-pipeline-unit-tests" + trigger: "(?m)^/test (all|pull-knative-build-pipeline-unit-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--unit-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-build-pipeline-integration-tests + agent: kubernetes + context: pull-knative-build-pipeline-integration-tests + always_run: true + rerun_command: "/test pull-knative-build-pipeline-integration-tests" + trigger: "(?m)^/test (all|pull-knative-build-pipeline-integration-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--integration-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-build-pipeline-go-coverage + labels: + preset-service-account: "true" + agent: kubernetes + context: pull-knative-build-pipeline-go-coverage + always_run: true + rerun_command: "/test pull-knative-build-pipeline-go-coverage" + trigger: "(?m)^/test (all|pull-knative-build-pipeline-go-coverage),?(\\s+|$)" + optional: true + decorate: true + clone_uri: "https://github.com/knative/build-pipeline.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--postsubmit-gcs-bucket=knative-prow" + - "--postsubmit-job-name=post-knative-build-pipeline-go-coverage" + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=50" + - "--github-token=/etc/github-token/token" + volumeMounts: + - name: github-token + mountPath: /etc/github-token + readOnly: true + volumes: + - name: github-token + secret: + secretName: covbot-token + + knative/eventing: + - name: pull-knative-eventing-build-tests + agent: kubernetes + context: pull-knative-eventing-build-tests + always_run: true + rerun_command: "/test pull-knative-eventing-build-tests" + trigger: "(?m)^/test (all|pull-knative-eventing-build-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--build-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-eventing-unit-tests + agent: kubernetes + context: pull-knative-eventing-unit-tests + always_run: true + rerun_command: "/test pull-knative-eventing-unit-tests" + trigger: "(?m)^/test (all|pull-knative-eventing-unit-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--unit-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-eventing-integration-tests + agent: kubernetes + context: pull-knative-eventing-integration-tests + always_run: true + rerun_command: "/test pull-knative-eventing-integration-tests" + trigger: "(?m)^/test (all|pull-knative-eventing-integration-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--integration-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-eventing-go-coverage + labels: + preset-service-account: "true" + agent: kubernetes + context: pull-knative-eventing-go-coverage + always_run: true + rerun_command: "/test pull-knative-eventing-go-coverage" + trigger: "(?m)^/test (all|pull-knative-eventing-go-coverage),?(\\s+|$)" + optional: true + decorate: true + clone_uri: "https://github.com/knative/eventing.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--postsubmit-gcs-bucket=knative-prow" + - "--postsubmit-job-name=post-knative-eventing-go-coverage" + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=50" + - "--github-token=/etc/github-token/token" + volumeMounts: + - name: github-token + mountPath: /etc/github-token + readOnly: true + volumes: + - name: github-token + secret: + secretName: covbot-token + + knative/eventing-sources: + - name: pull-knative-eventing-sources-build-tests + agent: kubernetes + context: pull-knative-eventing-sources-build-tests + always_run: true + rerun_command: "/test pull-knative-eventing-sources-build-tests" + trigger: "(?m)^/test (all|pull-knative-eventing-sources-build-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--build-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-eventing-sources-unit-tests + agent: kubernetes + context: pull-knative-eventing-sources-unit-tests + always_run: true + rerun_command: "/test pull-knative-eventing-sources-unit-tests" + trigger: "(?m)^/test (all|pull-knative-eventing-sources-unit-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--unit-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-eventing-sources-integration-tests + agent: kubernetes + context: pull-knative-eventing-sources-integration-tests + always_run: true + rerun_command: "/test pull-knative-eventing-sources-integration-tests" + trigger: "(?m)^/test (all|pull-knative-eventing-sources-integration-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--integration-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-eventing-sources-go-coverage + labels: + preset-service-account: "true" + agent: kubernetes + context: pull-knative-eventing-sources-go-coverage + always_run: true + rerun_command: "/test pull-knative-eventing-sources-go-coverage" + trigger: "(?m)^/test (all|pull-knative-eventing-sources-go-coverage),?(\\s+|$)" + optional: true + decorate: true + clone_uri: "https://github.com/knative/eventing-sources.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--postsubmit-gcs-bucket=knative-prow" + - "--postsubmit-job-name=post-knative-eventing-sources-go-coverage" + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=50" + - "--github-token=/etc/github-token/token" + volumeMounts: + - name: github-token + mountPath: /etc/github-token + readOnly: true + volumes: + - name: github-token + secret: + secretName: covbot-token + + knative/docs: + - name: pull-knative-docs-build-tests + agent: kubernetes + context: pull-knative-docs-build-tests + always_run: true + rerun_command: "/test pull-knative-docs-build-tests" + trigger: "(?m)^/test (all|pull-knative-docs-build-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--build-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-docs-unit-tests + agent: kubernetes + context: pull-knative-docs-unit-tests + always_run: true + rerun_command: "/test pull-knative-docs-unit-tests" + trigger: "(?m)^/test (all|pull-knative-docs-unit-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--unit-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-docs-integration-tests + agent: kubernetes + context: pull-knative-docs-integration-tests + always_run: true + rerun_command: "/test pull-knative-docs-integration-tests" + trigger: "(?m)^/test (all|pull-knative-docs-integration-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--integration-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-docs-go-coverage + labels: + preset-service-account: "true" + agent: kubernetes + context: pull-knative-docs-go-coverage + always_run: true + rerun_command: "/test pull-knative-docs-go-coverage" + trigger: "(?m)^/test (all|pull-knative-docs-go-coverage),?(\\s+|$)" + optional: true + decorate: true + clone_uri: "https://github.com/knative/docs.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--postsubmit-gcs-bucket=knative-prow" + - "--postsubmit-job-name=post-knative-docs-go-coverage" + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=50" + - "--github-token=/etc/github-token/token" + volumeMounts: + - name: github-token + mountPath: /etc/github-token + readOnly: true + volumes: + - name: github-token + secret: + secretName: covbot-token + + knative/build-templates: + - name: pull-knative-build-templates-build-tests + agent: kubernetes + context: pull-knative-build-templates-build-tests + always_run: true + rerun_command: "/test pull-knative-build-templates-build-tests" + trigger: "(?m)^/test (all|pull-knative-build-templates-build-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--build-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-build-templates-unit-tests + agent: kubernetes + context: pull-knative-build-templates-unit-tests + always_run: true + rerun_command: "/test pull-knative-build-templates-unit-tests" + trigger: "(?m)^/test (all|pull-knative-build-templates-unit-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--unit-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-build-templates-integration-tests + agent: kubernetes + context: pull-knative-build-templates-integration-tests + always_run: true + rerun_command: "/test pull-knative-build-templates-integration-tests" + trigger: "(?m)^/test (all|pull-knative-build-templates-integration-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--integration-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + knative/pkg: + - name: pull-knative-pkg-build-tests + agent: kubernetes + context: pull-knative-pkg-build-tests + always_run: true + rerun_command: "/test pull-knative-pkg-build-tests" + trigger: "(?m)^/test (all|pull-knative-pkg-build-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--build-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-pkg-unit-tests + agent: kubernetes + context: pull-knative-pkg-unit-tests + always_run: true + rerun_command: "/test pull-knative-pkg-unit-tests" + trigger: "(?m)^/test (all|pull-knative-pkg-unit-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--unit-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-pkg-integration-tests + agent: kubernetes + context: pull-knative-pkg-integration-tests + always_run: true + rerun_command: "/test pull-knative-pkg-integration-tests" + trigger: "(?m)^/test (all|pull-knative-pkg-integration-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--integration-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-pkg-go-coverage + labels: + preset-service-account: "true" + agent: kubernetes + context: pull-knative-pkg-go-coverage + always_run: true + rerun_command: "/test pull-knative-pkg-go-coverage" + trigger: "(?m)^/test (all|pull-knative-pkg-go-coverage),?(\\s+|$)" + optional: true + decorate: true + clone_uri: "https://github.com/knative/pkg.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--postsubmit-gcs-bucket=knative-prow" + - "--postsubmit-job-name=post-knative-pkg-go-coverage" + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=50" + - "--github-token=/etc/github-token/token" + volumeMounts: + - name: github-token + mountPath: /etc/github-token + readOnly: true + volumes: + - name: github-token + secret: + secretName: covbot-token + + knative/test-infra: + - name: pull-knative-test-infra-build-tests + agent: kubernetes + context: pull-knative-test-infra-build-tests + always_run: true + rerun_command: "/test pull-knative-test-infra-build-tests" + trigger: "(?m)^/test (all|pull-knative-test-infra-build-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--build-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-test-infra-unit-tests + agent: kubernetes + context: pull-knative-test-infra-unit-tests + always_run: true + rerun_command: "/test pull-knative-test-infra-unit-tests" + trigger: "(?m)^/test (all|pull-knative-test-infra-unit-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--unit-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-test-infra-integration-tests + agent: kubernetes + context: pull-knative-test-infra-integration-tests + always_run: true + rerun_command: "/test pull-knative-test-infra-integration-tests" + trigger: "(?m)^/test (all|pull-knative-test-infra-integration-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--integration-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + knative/caching: + - name: pull-knative-caching-build-tests + agent: kubernetes + context: pull-knative-caching-build-tests + always_run: true + rerun_command: "/test pull-knative-caching-build-tests" + trigger: "(?m)^/test (all|pull-knative-caching-build-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--build-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-caching-unit-tests + agent: kubernetes + context: pull-knative-caching-unit-tests + always_run: true + rerun_command: "/test pull-knative-caching-unit-tests" + trigger: "(?m)^/test (all|pull-knative-caching-unit-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--unit-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-caching-integration-tests + agent: kubernetes + context: pull-knative-caching-integration-tests + always_run: true + rerun_command: "/test pull-knative-caching-integration-tests" + trigger: "(?m)^/test (all|pull-knative-caching-integration-tests),?(\\s+|$)" + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/pr-logs" + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--integration-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + + - name: pull-knative-caching-go-coverage + labels: + preset-service-account: "true" + agent: kubernetes + context: pull-knative-caching-go-coverage + always_run: true + rerun_command: "/test pull-knative-caching-go-coverage" + trigger: "(?m)^/test (all|pull-knative-caching-go-coverage),?(\\s+|$)" + optional: true + decorate: true + clone_uri: "https://github.com/knative/caching.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--postsubmit-gcs-bucket=knative-prow" + - "--postsubmit-job-name=post-knative-caching-go-coverage" + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=50" + - "--github-token=/etc/github-token/token" + volumeMounts: + - name: github-token + mountPath: /etc/github-token + readOnly: true + volumes: + - name: github-token + secret: + secretName: covbot-token + +periodics: +- cron: "1 * * * *" # Run every hour and one minute + name: ci-knative-serving-continuous + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/serving" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=50" # Avoid overrun + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--all-tests" + - "--emit-metrics" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" +- cron: "1 8 * * *" # Run at 01:01PST every day (08:01 UTC) + name: ci-knative-serving-release + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/serving" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=90" # 1.5h + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./hack/release.sh" + - "--publish" + - "--tag-release" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" +- cron: "1 9 * * 6" # Run at 02:01PST every Saturday (09:01 UTC) + name: ci-knative-serving-playground + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/serving" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=90" # 1.5h + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./hack/deploy.sh" + - "knative-playground" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" +- cron: "5 8 * * *" # Run at 01:05PST every day (08:05 UTC) + name: ci-knative-serving-latency + agent: kubernetes + labels: + preset-service-account: "true" + decorate: true + extra_refs: + - org: knative + repo: serving + base_ref: master + clone_uri: "https://github.com/knative/serving.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/metrics:latest + imagePullPolicy: Always + command: + - "/metrics" + args: + - "--source-directory=ci-knative-serving-continuous" + - "--artifacts-dir=$(ARTIFACTS)" + - "--service-account=/etc/service-account/service-account.json" +- cron: "5 8 * * *" # Run at 01:05PST every day (08:05 UTC) + name: ci-knative-serving-api-coverage + agent: kubernetes + labels: + preset-service-account: "true" + decorate: true + extra_refs: + - org: knative + repo: serving + base_ref: master + clone_uri: "https://github.com/knative/serving.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/apicoverage:latest + imagePullPolicy: Always + command: + - "/apicoverage" + args: + - "--artifacts-dir=$(ARTIFACTS)" + - "--service-account=/etc/service-account/service-account.json" +- cron: "0 1 * * *" # Run at 01:00 every day + name: ci-knative-serving-go-coverage + agent: kubernetes + decorate: true + extra_refs: + - org: knative + repo: serving + base_ref: master + clone_uri: "https://github.com/knative/serving.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=80" +- cron: "0 1 * * *" # Run at 01:00 every day + name: ci-knative-serving-performance + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/serving" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=50" # Avoid overrun + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/performance-tests.sh" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + +- cron: "15 * * * *" # Run every hour and 15 minutes + name: ci-knative-build-continuous + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/build" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=50" # Avoid overrun + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--all-tests" + - "--emit-metrics" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" +- cron: "31 8 * * *" # Run at 01:31PST every day (08:31 UTC) + name: ci-knative-build-release + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + preset-dind-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/build" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=90" # 1.5h + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./hack/release.sh" + - "--publish" + - "--tag-release" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" +- cron: "5 8 * * *" # Run at 01:05PST every day (08:05 UTC) + name: ci-knative-build-latency + agent: kubernetes + labels: + preset-service-account: "true" + decorate: true + extra_refs: + - org: knative + repo: build + base_ref: master + clone_uri: "https://github.com/knative/build.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/metrics:latest + imagePullPolicy: Always + command: + - "/metrics" + args: + - "--source-directory=ci-knative-build-continuous" + - "--artifacts-dir=$(ARTIFACTS)" + - "--service-account=/etc/service-account/service-account.json" +- cron: "0 1 * * *" # Run at 01:00 every day + name: ci-knative-build-go-coverage + agent: kubernetes + decorate: true + extra_refs: + - org: knative + repo: build + base_ref: master + clone_uri: "https://github.com/knative/build.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=80" + +- cron: "50 * * * *" # Run every hour and 50 minutes + name: ci-knative-docs-continuous + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/docs" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=50" # Avoid overrun + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--all-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" +- cron: "0 1 * * *" # Run at 01:00 every day + name: ci-knative-docs-go-coverage + agent: kubernetes + decorate: true + extra_refs: + - org: knative + repo: docs + base_ref: master + clone_uri: "https://github.com/knative/docs.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=80" + +- cron: "30 * * * *" # Run every hour and 30 minutes + name: ci-knative-eventing-continuous + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/eventing" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=50" # Avoid overrun + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--all-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" +- cron: "16 9 * * *" # Run at 02:16PST every day (09:16 UTC) + name: ci-knative-eventing-release + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/eventing" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=90" # 1.5h + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./hack/release.sh" + - "--publish" + - "--tag-release" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" +- cron: "0 1 * * *" # Run at 01:00 every day + name: ci-knative-eventing-go-coverage + agent: kubernetes + decorate: true + extra_refs: + - org: knative + repo: eventing + base_ref: master + clone_uri: "https://github.com/knative/eventing.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=80" + +- cron: "30 * * * *" # Run every hour and 30 minutes + name: ci-knative-eventing-sources-continuous + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/eventing-sources" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=50" # Avoid overrun + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--all-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" +- cron: "16 9 * * *" # Run at 02:16PST every day (09:16 UTC) + name: ci-knative-eventing-sources-release + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/eventing-sources" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=90" # 1.5h + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./hack/release.sh" + - "--publish" + - "--tag-release" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" +- cron: "0 1 * * *" # Run at 01:00 every day + name: ci-knative-eventing-sources-go-coverage + agent: kubernetes + decorate: true + extra_refs: + - org: knative + repo: eventing-sources + base_ref: master + clone_uri: "https://github.com/knative/eventing-sources.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=80" + +- cron: "40 * * * *" # Run every hour and 40 minutes + name: ci-knative-build-templates-continuous + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/build-templates" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=50" # Avoid overrun + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--all-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" + +- cron: "45 * * * *" # Run every hour and 45 minutes + name: ci-knative-pkg-continuous + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/pkg" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=50" # Avoid overrun + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--all-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" +- cron: "0 1 * * *" # Run at 01:00 every day + name: ci-knative-pkg-go-coverage + agent: kubernetes + decorate: true + extra_refs: + - org: knative + repo: pkg + base_ref: master + clone_uri: "https://github.com/knative/pkg.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=80" + +- cron: "30 * * * *" # Run every hour and 30 minutes + name: ci-knative-caching-continuous + agent: kubernetes + labels: + preset-service-account: "true" + preset-bazel-scratch-dir: "true" + preset-bazel-remote-cache-enabled: "true" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/prow-tests:latest + imagePullPolicy: Always + args: + - "--scenario=kubernetes_execute_bazel" + - "--clean" + - "--job=$(JOB_NAME)" + - "--repo=github.com/knative/caching" + - "--root=/go/src" + - "--service-account=/etc/service-account/service-account.json" + - "--upload=gs://knative-prow/logs" + - "--timeout=50" # Avoid overrun + - "--" # end bootstrap args, scenario args below + - "--" # end kubernetes_execute_bazel flags (consider following flags as text) + - "./test/presubmit-tests.sh" + - "--all-tests" + # Bazel needs privileged mode in order to sandbox builds. + securityContext: + privileged: true + resources: + requests: + memory: "1Gi" +- cron: "0 1 * * *" # Run at 01:00 every day + name: ci-knative-caching-go-coverage + agent: kubernetes + decorate: true + extra_refs: + - org: knative + repo: caching + base_ref: master + clone_uri: "https://github.com/knative/caching.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=80" + +postsubmits: + knative/serving: + - name: post-knative-serving-go-coverage + branches: + - master + agent: kubernetes + decorate: true + clone_uri: "https://github.com/knative/serving.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=0" + - name: post-knative-serving-go-coverage-dev + branches: + - master + agent: kubernetes + decorate: true + clone_uri: "https://github.com/knative/serving.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage-dev:latest-dev + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=0" + + knative/build: + - name: post-knative-build-go-coverage + branches: + - master + agent: kubernetes + decorate: true + clone_uri: "https://github.com/knative/build.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=0" + + knative/docs: + - name: post-knative-docs-go-coverage + branches: + - master + agent: kubernetes + decorate: true + clone_uri: "https://github.com/knative/docs.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=0" + + knative/eventing: + - name: post-knative-eventing-go-coverage + branches: + - master + agent: kubernetes + decorate: true + clone_uri: "https://github.com/knative/eventing.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=0" + + knative/eventing-sources: + - name: post-knative-eventing-sources-go-coverage + branches: + - master + agent: kubernetes + decorate: true + clone_uri: "https://github.com/knative/eventing-sources.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=0" + + knative/pkg: + - name: post-knative-pkg-go-coverage + branches: + - master + agent: kubernetes + decorate: true + clone_uri: "https://github.com/knative/pkg.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=0" + + knative/caching: + - name: post-knative-caching-go-coverage + branches: + - master + agent: kubernetes + decorate: true + clone_uri: "https://github.com/knative/caching.git" + spec: + containers: + - image: gcr.io/knative-tests/test-infra/coverage:latest + imagePullPolicy: Always + command: + - "/coverage" + args: + - "--artifacts=$(ARTIFACTS)" + - "--profile-name=coverage_profile.txt" + - "--cov-target=." + - "--cov-threshold-percentage=0" + diff --git a/vendor/github.com/knative/test-infra/ci/prow/config_start.yaml b/vendor/github.com/knative/test-infra/ci/prow/config_start.yaml new file mode 100644 index 00000000000..ada1a3e62fa --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/prow/config_start.yaml @@ -0,0 +1,339 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Initial configuration of prow cluster + +# Configs + +apiVersion: v1 +kind: ConfigMap +metadata: + name: plugins +data: + plugins: "" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config +data: + config: "" +--- + +# Namespaces + +apiVersion: v1 +kind: Namespace +metadata: + name: prow +--- +apiVersion: v1 +kind: Namespace +metadata: + name: test-pods +--- + +# Service accounts, roles and bindings + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: "boskos" + namespace: test-pods +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "boskos" + namespace: test-pods +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: boskos +subjects: +- kind: ServiceAccount + name: "boskos" + namespace: test-pods +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "boskos" + namespace: test-pods +rules: + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - apiGroups: + - boskos.k8s.io + resources: + - resources + verbs: + - "*" +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: "default" + namespace: test-pods +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "test-pods-default" + namespace: test-pods +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "test-pods-default" +subjects: +- kind: ServiceAccount + name: "default" + namespace: test-pods +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "test-pods-default" + namespace: test-pods +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: "deck" +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "deck" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "deck" +subjects: +- kind: ServiceAccount + name: "deck" + namespace: default +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "deck" +rules: + - apiGroups: + - "" + resources: + - pods/log + verbs: + - get + - apiGroups: + - "prow.k8s.io" + resources: + - prowjobs + verbs: + - get + - list +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: "horologium" +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "horologium" +rules: + - apiGroups: + - "prow.k8s.io" + resources: + - prowjobs + verbs: + - create + - list +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "horologium" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "horologium" +subjects: +- kind: ServiceAccount + name: "horologium" + namespace: default +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: "plank" +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "plank" +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - list + - apiGroups: + - "prow.k8s.io" + resources: + - prowjobs + verbs: + - create + - list + - update +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "plank" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "plank" +subjects: +- kind: ServiceAccount + name: "plank" + namespace: default +--- +kind: ServiceAccount +apiVersion: v1 +metadata: + name: "sinker" +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "sinker" +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - delete + - list + - apiGroups: + - "prow.k8s.io" + resources: + - prowjobs + verbs: + - delete + - list +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "sinker" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "sinker" +subjects: +- kind: ServiceAccount + name: "sinker" + namespace: default +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "hook" +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "hook" +rules: + - apiGroups: + - "prow.k8s.io" + resources: + - prowjobs + verbs: + - create + - get + - apiGroups: + - "" + resources: + - configmaps + verbs: + - update +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "hook" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "hook" +subjects: +- kind: ServiceAccount + name: "hook" +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "tide" +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "tide" +rules: + - apiGroups: + - "prow.k8s.io" + resources: + - prowjobs + verbs: + - create + - get + - list + - apiGroups: + - "" + resources: + - configmaps + verbs: + - update +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: "tide" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "tide" +subjects: +- kind: ServiceAccount + name: "tide" + namespace: default diff --git a/vendor/github.com/knative/test-infra/ci/prow/plugins.yaml b/vendor/github.com/knative/test-infra/ci/prow/plugins.yaml new file mode 100644 index 00000000000..57c8b2f4079 --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/prow/plugins.yaml @@ -0,0 +1,41 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +approve: +- repos: + - knative + implicit_self_approve: true + review_acts_as_approve: true + +plugins: + knative: + - approve + - assign + - blunderbuss + - buildifier + - cat + - dog + - golint + - heart + - help + - hold + - label + - lgtm + - lifecycle + - shrug + - size + - skip + - trigger + - wip + - yuks diff --git a/vendor/github.com/knative/test-infra/ci/prow/prow_setup.md b/vendor/github.com/knative/test-infra/ci/prow/prow_setup.md new file mode 100644 index 00000000000..3f04729dfb5 --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/prow/prow_setup.md @@ -0,0 +1,71 @@ +# Prow setup + +## Creating the cluster + +1. Create the GKE cluster, the role bindings and the GitHub secrets. You might need to update [Makefile](./Makefile). For details, see https://github.com/kubernetes/test-infra/blob/master/prow/getting_started.md. + +1. Ensure the GCP projects listed in [resources.yaml](./boskos/resources.yaml) are created. + +1. Apply [config_start.yaml](./config_start.yaml) to the cluster. + +1. Apply Boskos [config_start.yaml](./boskos/config_start.yaml) to the cluster. + +1. Run `make update-cluster`, `make update-boskos`, `make update-config`, `make update-plugins` and `make update-boskos-config`. + +1. If SSL needs to be reconfigured, promote your ingress IP to static in Cloud Console, and [create the TLS secret](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls). + +## Expanding Boskos pool + +1. Create a new GCP project and add it to [resources.yaml](./boskos/resources.yaml). + +1. Make `knative-tests@appspot.gserviceaccount.com` an editor of the project. + +1. Enable the Compute Engine API for the project (e.g., by visiting https://console.developers.google.com/apis/api/compute.googleapis.com/overview?project=XXXXXXXX). + +1. Enable the Kubernetes Engine API for the project (e.g., by visiting https://console.cloud.google.com/apis/api/container.googleapis.com/overview?project=XXXXXXXX). + +1. Run `make update-boskos-config`. + +## Setting up Prow for a new repo + +1. Create the appropriate `OWNERS` files (at least one for the root dir). + +1. Make sure that *Knative Robots* is an Admin of the repo. + +1. Update the tide section in the Prow config file and run `make update-config` (ask one of the owners of knative/test-infra). + +1. Wait a few minutes, check that Prow is working by entering `/woof` as a comment in any PR in the new repo. + +1. Set **tide** as a required status check for the master branch. + +### Setting up jobs for a new repo + +1. Have the test infrastructure in place (usually this means having at least `//test/presubmit-tests.sh` working, and optionally `//hack/release.sh` working for automated nightly releases). + +1. Merge a pull request (e.g., https://github.com/knative/test-infra/pull/203) that: + + 1. Updates the Prow config file (usually, copy and update existing jobs from another repository). + + 1. For the presubmit tests, setup the *pull-knative-**repo**-**(build|unit|integration)**-tests* jobs. + + 1. For go test coverage, setup the ***(pull|post|ci)**-knative-**repo**-go-coverage* jobs. + + 1. For the continuous integration tests, setup the *ci-knative-**repo**-continuous* job. + + 1. For automated nightly releases, setup the *ci-knative-**repo**-release* job. + + 1. Updates the Gubernator config with the new log dirs. + + 1. Updates the Testgrid config with the new buckets, tabs and dashboard. + +1. Ask one of the owners of *knative/test-infra* to: + + 1. Run `make update-config` in `ci/prow`. + + 1. Run `make deploy` in `ci/gubernator`. + + 1. Run `make update-config` in `ci/testgrid`. + +1. Wait a few minutes, enter `/retest` as a comment in any PR in the repo and ensure the test jobs are executed. + +1. Set the new test jobs as required status checks for the master branch. diff --git a/vendor/github.com/knative/test-infra/ci/testgrid/Makefile b/vendor/github.com/knative/test-infra/ci/testgrid/Makefile new file mode 100644 index 00000000000..5cf42d995b9 --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/testgrid/Makefile @@ -0,0 +1,29 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TESTGRID_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + +test: + bazel run @k8s//testgrid/cmd/configurator -- \ + --yaml=$(TESTGRID_DIR)/config.yaml \ + --validate-config-file + +update-config: +ifndef GOOGLE_APPLICATION_CREDENTIALS + $(error GOOGLE_APPLICATION_CREDENTIALS not set) +endif + bazel run @k8s//testgrid/cmd/configurator -- \ + --yaml=$(TESTGRID_DIR)/config.yaml \ + --output=gs://knative-testgrid/config \ + --oneshot diff --git a/vendor/github.com/knative/test-infra/ci/testgrid/README.md b/vendor/github.com/knative/test-infra/ci/testgrid/README.md new file mode 100644 index 00000000000..7b028e040f3 --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/testgrid/README.md @@ -0,0 +1,6 @@ +# Testgrid config + +This directory contains the config for our [Testgrid](https://github.com/kubernetes/test-infra/tree/master/testgrid) instance. + +* `Makefile` Commands to interact with the Testgrid instance regarding updates. +* `config.yaml` Testgrid configuration. diff --git a/vendor/github.com/knative/test-infra/ci/testgrid/config.yaml b/vendor/github.com/knative/test-infra/ci/testgrid/config.yaml new file mode 100644 index 00000000000..60a5958eb09 --- /dev/null +++ b/vendor/github.com/knative/test-infra/ci/testgrid/config.yaml @@ -0,0 +1,213 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default testgroup and dashboardtab, please do not change them +default_test_group: + days_of_results: 14 # Number of days of test results to gather and serve + tests_name_policy: 2 # Replace the name of the test + ignore_pending: false # Show in-progress tests + column_header: + - configuration_value: Commit # Shows the commit number on column header + - configuration_value: infra-commit + num_columns_recent: 10 # The number of columns to consider "recent" for a variety of purposes + use_kubernetes_client: true # ** This field is deprecated and should always be true ** + is_external: true # ** This field is deprecated and should always be true ** + alert_stale_results_hours: 0 # Don't alert for staleness by default + num_failures_to_alert: 3 # Consider a test failed if it has 3 or more consecutive failures + num_passes_to_disable_alert: 1 # Consider a failing test passing if it has 1 or more consecutive passes + +default_dashboard_tab: + open_test_template: # The URL template to visit after clicking on a cell + url: https://gubernator.knative.dev/build// + file_bug_template: # The URL template to visit when filing a bug + url: https://github.com/knative/serving/issues/new + options: + - key: title + value: 'Test "" failed' + - key: body + value: + attach_bug_template: # The URL template to visit when attaching a bug + url: # Empty + options: # Empty + # Text to show in the about menu as a link to another view of the results + results_text: See these results in Gubernator + results_url_template: # The URL template to visit after clicking + url: https://gubernator.knative.dev/builds/ + # URL for regression search links. + code_search_path: github.com/knative/serving/search + num_columns_recent: 10 + code_search_url_template: # The URL template to visit when searching for changelists + url: https://github.com/knative/serving/compare/... + +# Test groups + +test_groups: +- name: ci-knative-serving-continuous + gcs_prefix: knative-prow/logs/ci-knative-serving-continuous +- name: ci-knative-serving-release + gcs_prefix: knative-prow/logs/ci-knative-serving-release +- name: ci-knative-serving-playground + gcs_prefix: knative-prow/logs/ci-knative-serving-playground +- name: pull-knative-serving-test-coverage + gcs_prefix: knative-prow/logs/ci-knative-serving-go-coverage + short_text_metric: coverage +- name: ci-knative-serving-latency + gcs_prefix: knative-prow/logs/ci-knative-serving-latency + short_text_metric: latency +- name: ci-knative-serving-api-coverage + gcs_prefix: knative-prow/logs/ci-knative-serving-api-coverage + short_text_metric: api_coverage +- name: ci-knative-build-continuous + gcs_prefix: knative-prow/logs/ci-knative-build-continuous +- name: ci-knative-build-release + gcs_prefix: knative-prow/logs/ci-knative-build-release +- name: pull-knative-build-test-coverage + gcs_prefix: knative-prow/logs/ci-knative-build-go-coverage + short_text_metric: coverage +- name: ci-knative-build-latency + gcs_prefix: knative-prow/logs/ci-knative-build-latency + short_text_metric: latency +- name: ci-knative-build-templates-continuous + gcs_prefix: knative-prow/logs/ci-knative-build-templates-continuous +- name: pull-knative-build-pipeline-test-coverage + gcs_prefix: knative-prow/logs/ci-knative-build-pipeline-go-coverage + short_text_metric: coverage +- name: ci-knative-eventing-continuous + gcs_prefix: knative-prow/logs/ci-knative-eventing-continuous +- name: ci-knative-eventing-release + gcs_prefix: knative-prow/logs/ci-knative-eventing-release +- name: pull-knative-eventing-test-coverage + gcs_prefix: knative-prow/logs/ci-knative-eventing-go-coverage + short_text_metric: coverage +- name: ci-knative-eventing-sources-continuous + gcs_prefix: knative-prow/logs/ci-knative-eventing-sources-continuous +- name: ci-knative-eventing-sources-release + gcs_prefix: knative-prow/logs/ci-knative-eventing-sources-release +- name: pull-knative-eventing-sources-test-coverage + gcs_prefix: knative-prow/logs/ci-knative-eventing-sources-go-coverage + short_text_metric: coverage +- name: ci-knative-docs-continuous + gcs_prefix: knative-prow/logs/ci-knative-docs-continuous +- name: pull-knative-docs-test-coverage + gcs_prefix: knative-prow/logs/ci-knative-docs-go-coverage + short_text_metric: coverage +- name: ci-knative-pkg-continuous + gcs_prefix: knative-prow/logs/ci-knative-pkg-continuous +- name: pull-knative-pkg-test-coverage + gcs_prefix: knative-prow/logs/ci-knative-pkg-go-coverage + short_text_metric: coverage +- name: ci-knative-caching-continuous + gcs_prefix: knative-prow/logs/ci-knative-caching-continuous +- name: pull-knative-caching-test-coverage + gcs_prefix: knative-prow/logs/ci-knative-caching-go-coverage + short_text_metric: coverage + +# Dashboards + +dashboards: +- name: knative-serving + dashboard_tab: + - name: continuous + test_group_name: ci-knative-serving-continuous + - name: release + test_group_name: ci-knative-serving-release + - name: playground + test_group_name: ci-knative-serving-playground + - name: coverage + test_group_name: pull-knative-serving-test-coverage + base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' + - name: latency + test_group_name: ci-knative-serving-latency + description: '95% latency in ms' + base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' + - name: api-coverage + test_group_name: ci-knative-serving-api-coverage + description: 'Conformance tests API coverage.' + base_options: 'exclude-filter-by-regex=Overall$&group-by-directory=&expand-groups=&sort-by-name=' +- name: knative-build + dashboard_tab: + - name: continuous + test_group_name: ci-knative-build-continuous + - name: release + test_group_name: ci-knative-build-release + - name: coverage + test_group_name: pull-knative-build-test-coverage + base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' + - name: latency + test_group_name: ci-knative-build-latency + description: '95% latency in ms' + base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' +- name: knative-build-templates + dashboard_tab: + - name: continuous + test_group_name: ci-knative-build-templates-continuous +- name: knative-build-pipeline + dashboard_tab: + - name: coverage + test_group_name: pull-knative-build-pipeline-test-coverage + base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' +- name: knative-eventing + dashboard_tab: + - name: continuous + test_group_name: ci-knative-eventing-continuous + - name: release + test_group_name: ci-knative-eventing-release + - name: coverage + test_group_name: pull-knative-eventing-test-coverage + base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' +- name: knative-eventing-sources + dashboard_tab: + - name: continuous + test_group_name: ci-knative-eventing-sources-continuous + - name: release + test_group_name: ci-knative-eventing-sources-release + - name: coverage + test_group_name: pull-knative-eventing-sources-test-coverage + base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' +- name: knative-docs + dashboard_tab: + - name: continuous + test_group_name: ci-knative-docs-continuous + - name: coverage + test_group_name: pull-knative-docs-test-coverage + base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' +- name: knative-pkg + dashboard_tab: + - name: continuous + test_group_name: ci-knative-pkg-continuous + - name: coverage + test_group_name: pull-knative-pkg-test-coverage + base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' +- name: knative-caching + dashboard_tab: + - name: continuous + test_group_name: ci-knative-caching-continuous + - name: coverage + test_group_name: pull-knative-caching-test-coverage + base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' + +# Dashboard groups + +dashboard_groups: +- name: knative + dashboard_names: + - knative-serving + - knative-build + - knative-build-templates + - knative-build-pipeline + - knative-eventing + - knative-eventing-sources + - knative-docs + - knative-pkg + - knative-caching diff --git a/vendor/github.com/knative/test-infra/dummy.go b/vendor/github.com/knative/test-infra/dummy.go new file mode 100644 index 00000000000..94e6159ba78 --- /dev/null +++ b/vendor/github.com/knative/test-infra/dummy.go @@ -0,0 +1,10 @@ +package main + +import ( + "fmt" +) + +func main() { + fmt.Println("This is a dummy go file so `go dep` can be used with knative/test-infra repo") + fmt.Println("This file can be removed once the repo contains real, useful go code in the root dir") +} diff --git a/vendor/github.com/knative/test-infra/images/README.md b/vendor/github.com/knative/test-infra/images/README.md new file mode 100644 index 00000000000..22b9b16edd0 --- /dev/null +++ b/vendor/github.com/knative/test-infra/images/README.md @@ -0,0 +1,3 @@ +# Prow Job Images + +This directory contains custom Docker images used by our Prow jobs. diff --git a/vendor/github.com/knative/test-infra/images/apicoverage/Dockerfile b/vendor/github.com/knative/test-infra/images/apicoverage/Dockerfile new file mode 100644 index 00000000000..897ec7d82ef --- /dev/null +++ b/vendor/github.com/knative/test-infra/images/apicoverage/Dockerfile @@ -0,0 +1,20 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM golang:1.10.2 +LABEL maintainer "Srinivas Hegde " +RUN apt-get update && apt-get install -y --no-install-recommends + +COPY apicoverage /apicoverage +ENTRYPOINT ["/apicoverage"] diff --git a/vendor/github.com/knative/test-infra/images/apicoverage/Makefile b/vendor/github.com/knative/test-infra/images/apicoverage/Makefile new file mode 100644 index 00000000000..b5a87ca546c --- /dev/null +++ b/vendor/github.com/knative/test-infra/images/apicoverage/Makefile @@ -0,0 +1,23 @@ + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +REGISTRY ?= gcr.io +PROJECT ?= knative-tests/test-infra +PUSH ?= docker push + +apicoverage-image: + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ../../tools/apicoverage + docker build -t "$(REGISTRY)/$(PROJECT)/apicoverage:latest" . + $(PUSH) "$(REGISTRY)/$(PROJECT)/apicoverage:latest" diff --git a/vendor/github.com/knative/test-infra/images/apicoverage/README.md b/vendor/github.com/knative/test-infra/images/apicoverage/README.md new file mode 100644 index 00000000000..b855777358b --- /dev/null +++ b/vendor/github.com/knative/test-infra/images/apicoverage/README.md @@ -0,0 +1,3 @@ +# API coverage tool Image + +This directory contains the custom docker image used for calculating the API coverage by the conformance tests. diff --git a/vendor/github.com/knative/test-infra/images/prow-tests/Dockerfile b/vendor/github.com/knative/test-infra/images/prow-tests/Dockerfile new file mode 100644 index 00000000000..7baf483481e --- /dev/null +++ b/vendor/github.com/knative/test-infra/images/prow-tests/Dockerfile @@ -0,0 +1,56 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM gcr.io/k8s-testimages/kubekins-e2e:v20181001-df2f5324a-master +LABEL maintainer "Adriano Cunha " + +# Install extras on top of base image + +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update +RUN gcloud components update + +# Docker +RUN gcloud components install docker-credential-gcr +RUN docker-credential-gcr configure-docker + +# Extra tools through apt-get +RUN apt-get install -y uuid-runtime # for uuidgen +RUN apt-get install -y npm # for markdown-link-check +RUN apt-get install -y rubygems # for mdl +RUN apt-get install -y build-essential libssl-dev # for wrk +RUN apt-get install -y netbase # sets up /etc/services needed for wrk + +# Extra tools through go get +RUN go get -u github.com/google/go-containerregistry/cmd/ko +RUN go get -u github.com/golang/dep/cmd/dep +RUN go get -u github.com/google/licenseclassifier + +# Extra tools through npm +RUN npm install -g markdown-link-check + +# Extra tools through gem +RUN gem install mixlib-config -v 2.2.4 # required because ruby is 2.1 +RUN gem install mdl + +# Install wrk +RUN git clone https://github.com/wg/wrk.git wrk +RUN make -C wrk/ +RUN cp wrk/wrk /usr/local/bin + +ADD . /go/src/github.com/knative/test-infra + +# Extra custom tools +RUN cp /go/src/github.com/knative/test-infra/tools/githubhelper/githubhelper . +RUN go install github.com/knative/test-infra/tools/dep-collector diff --git a/vendor/github.com/knative/test-infra/images/prow-tests/Makefile b/vendor/github.com/knative/test-infra/images/prow-tests/Makefile new file mode 100644 index 00000000000..6e1ce3c08ca --- /dev/null +++ b/vendor/github.com/knative/test-infra/images/prow-tests/Makefile @@ -0,0 +1,34 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +IMG = gcr.io/knative-tests/test-infra/prow-tests +TAG := $(shell date +v%Y%m%d)-$(shell git describe --tags --always --dirty) + +all: build + +build: + make -C ../../tools/githubhelper + docker build -t $(IMG):$(TAG) -f Dockerfile ../.. + docker tag $(IMG):$(TAG) $(IMG):latest + +push_versioned: build + docker push $(IMG):$(TAG) + +push_latest: build + docker push $(IMG):latest + +clean: + rm -fr githubhelper dep-collector + +push: push_versioned push_latest clean diff --git a/vendor/github.com/knative/test-infra/images/prow-tests/README.md b/vendor/github.com/knative/test-infra/images/prow-tests/README.md new file mode 100644 index 00000000000..d1b904427e4 --- /dev/null +++ b/vendor/github.com/knative/test-infra/images/prow-tests/README.md @@ -0,0 +1,13 @@ +# Prow Test Job Image + +This directory contains the custom Docker image used by our Prow test jobs. + +## Building and publishing a new image + +To build and push a new image, just run `make push`. + +For testing purposes you can build an image but not push it; to do so, run `make build`. + +Note that you must have proper permission in the `knative-tests` project to push new images to the GCR. + +The `prow-tests` image is pinned on a specific `kubekins` image; update `Dockerfile` if you need to use a newer/different image. This will basically define the versions of `bazel`, `go`, `kubectl` and other build tools. diff --git a/vendor/github.com/knative/test-infra/test/e2e-tests.sh b/vendor/github.com/knative/test-infra/test/e2e-tests.sh new file mode 100755 index 00000000000..128733ce38d --- /dev/null +++ b/vendor/github.com/knative/test-infra/test/e2e-tests.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs the end-to-end tests. + +# If you already have a Knative cluster setup and kubectl pointing +# to it, call this script with the --run-tests arguments and it will use +# the cluster and run the tests. + +# Calling this script without arguments will create a new cluster in +# project $PROJECT_ID, run the tests and delete the cluster. + +source $(dirname $0)/../scripts/e2e-tests.sh + +function parse_flags() { + if [[ "$1" == "--smoke-test-custom-flag" ]]; then + echo "--smoke-test-custom-flag passed" + exit 0 + fi + return 0 +} + +# Script entry point. + +initialize $@ + +if (( USING_EXISTING_CLUSTER )); then + echo "ERROR: this test isn't intended to run against an existing cluster" + fail_test +fi + +start_latest_knative_serving || fail_test "Knative Serving is not up" + +# This is actually a unit test, but it does exercise the necessary helper functions. +go_test_e2e -run TestE2ESucceeds ./test || fail_test + +success diff --git a/vendor/github.com/knative/test-infra/test/presubmit-tests.sh b/vendor/github.com/knative/test-infra/test/presubmit-tests.sh new file mode 100755 index 00000000000..f3f1c71080e --- /dev/null +++ b/vendor/github.com/knative/test-infra/test/presubmit-tests.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs the presubmit tests; it is started by prow for each PR. +# For convenience, it can also be executed manually. +# Running the script without parameters, or with the --all-tests +# flag, causes all tests to be executed, in the right order. +# Use the flags --build-tests, --unit-tests and --integration-tests +# to run a specific set of tests. + +source $(dirname $0)/../scripts/presubmit-tests.sh + +function build_tests() { + header "Running build tests" + local failed=0 + make -C ci/prow test || failed=1 + make -C ci/testgrid test || failed=1 + for script in scripts/*.sh; do + echo "Checking integrity of ${script}" + bash -c "source ${script}" || failed=1 + done + return ${failed} +} + +function unit_tests() { + header "Running unit tests" + local failed=0 + for test in ./test/unit/*-tests.sh; do + ${test} || failed=1 + done + return ${failed} +} + +# We use the default integration test runner. + +main $@ diff --git a/vendor/github.com/knative/test-infra/test/unit/e2e-custom-flag-tests.sh b/vendor/github.com/knative/test-infra/test/unit/e2e-custom-flag-tests.sh new file mode 100755 index 00000000000..b5528861752 --- /dev/null +++ b/vendor/github.com/knative/test-infra/test/unit/e2e-custom-flag-tests.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs the end-to-end tests. + +# If you already have a Knative cluster setup and kubectl pointing +# to it, call this script with the --run-tests arguments and it will use +# the cluster and run the tests. + +# Calling this script without arguments will create a new cluster in +# project $PROJECT_ID, run the tests and delete the cluster. + +source $(dirname $0)/../../scripts/e2e-tests.sh + +function parse_flags() { + if [[ "$1" == "--smoke-test-custom-flag" ]]; then + echo "OK: --smoke-test-custom-flag passed" + exit 0 + fi + fail_test "Unexpected flag $1 passed" +} + +# Script entry point. + +initialize --smoke-test-custom-flag diff --git a/vendor/github.com/knative/test-infra/test/unit/library-tests.sh b/vendor/github.com/knative/test-infra/test/unit/library-tests.sh new file mode 100755 index 00000000000..13bf1cee725 --- /dev/null +++ b/vendor/github.com/knative/test-infra/test/unit/library-tests.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Fake we're in a Prow job, if running locally. +[[ -z "${PROW_JOB_ID:-}" ]] && PROW_JOB_ID=123 + +source $(dirname $0)/../../scripts/library.sh + +set -e + +function test_report() { + local REPORT="$(mktemp)" + report_go_test -run $1 ./test > ${REPORT} || true + cat ${REPORT} + grep "$2" ${REPORT} > /dev/null + grep "Done parsing 1 tests" ${REPORT} > /dev/null +} + +# Cleanup bazel stuff to avoid confusing Prow +function cleanup_bazel() { + bazel clean +} + +trap cleanup_bazel EXIT + +header "Testing report_go_test" + +subheader "Test pass" +test_report TestSucceeds "^- TestSucceeds :PASS:" + +subheader "Test fails with fatal" +test_report TestFailsWithFatal "^- TestFailsWithFatal :FAIL:" + +subheader "Test fails with SIGQUIT" +test_report TestFailsWithSigQuit "^- TestFailsWithSigQuit :FAIL:" + +header "All tests passed" diff --git a/vendor/github.com/knative/test-infra/test/unit/presubmit-full-custom-integration-tests.sh b/vendor/github.com/knative/test-infra/test/unit/presubmit-full-custom-integration-tests.sh new file mode 100755 index 00000000000..d22b66e32dc --- /dev/null +++ b/vendor/github.com/knative/test-infra/test/unit/presubmit-full-custom-integration-tests.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source $(dirname $0)/presubmit-integration-tests-common.sh + +function check_results() { + (( PRE_INTEGRATION_TESTS )) || failed "Pre integration tests did not run" + (( CUSTOM_INTEGRATION_TESTS )) || failed "Custom integration tests did not run" + (( POST_INTEGRATION_TESTS )) || failed "Post integration tests did not run" + echo "Test passed" +} + +echo "Testing all custom test integration functions" + +main $@ diff --git a/vendor/github.com/knative/test-infra/test/unit/presubmit-integration-tests-common.sh b/vendor/github.com/knative/test-infra/test/unit/presubmit-integration-tests-common.sh new file mode 100755 index 00000000000..78c0f4d0646 --- /dev/null +++ b/vendor/github.com/knative/test-infra/test/unit/presubmit-integration-tests-common.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source $(dirname $0)/../../scripts/presubmit-tests.sh + +function failed() { + echo $1 + exit 1 +} + +function pre_integration_tests() { + PRE_INTEGRATION_TESTS=1 +} + +function integration_tests() { + CUSTOM_INTEGRATION_TESTS=1 +} + +function post_integration_tests() { + POST_INTEGRATION_TESTS=1 +} + +function build_tests() { + return 0 +} + +function unit_tests() { + return 0 +} + +PRE_INTEGRATION_TESTS=0 +CUSTOM_INTEGRATION_TESTS=0 +POST_INTEGRATION_TESTS=0 + +trap check_results EXIT diff --git a/vendor/github.com/knative/test-infra/test/unit/presubmit-partial-custom-integration-tests.sh b/vendor/github.com/knative/test-infra/test/unit/presubmit-partial-custom-integration-tests.sh new file mode 100755 index 00000000000..e0fb4ef24fa --- /dev/null +++ b/vendor/github.com/knative/test-infra/test/unit/presubmit-partial-custom-integration-tests.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Test that pre/post integration tests don't run if unset. + +source $(dirname $0)/presubmit-integration-tests-common.sh + +function check_results() { + (( ! PRE_INTEGRATION_TESTS )) || failed "Pre integration tests did run" + (( CUSTOM_INTEGRATION_TESTS )) || failed "Custom integration tests did not run" + (( ! POST_INTEGRATION_TESTS )) || failed "Post integration tests did run" + echo "Test passed" +} + +echo "Testing custom test integration function" + +unset -f pre_integration_tests +unset -f post_integration_tests + +main $@ diff --git a/vendor/github.com/knative/test-infra/test/unit/release-tests.sh b/vendor/github.com/knative/test-infra/test/unit/release-tests.sh new file mode 100755 index 00000000000..4b226df1bb6 --- /dev/null +++ b/vendor/github.com/knative/test-infra/test/unit/release-tests.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source $(dirname $0)/../../scripts/release.sh + +set -e + +# Call a function and verify its return value and output. +# Parameters: $1 - expected return code. +# $2 - expected output ("" if no output is expected) +# $3 ..$n - function to call and its parameters. +function test_function() { + local expected_retcode=$1 + local expected_string=$2 + local output="$(mktemp)" + local output_code="$(mktemp)" + shift 2 + echo -n "$(trap '{ echo $? > ${output_code}; }' EXIT ; $@)" > ${output} + local retcode=$(cat ${output_code}) + if [[ ${retcode} -ne ${expected_retcode} ]]; then + cat ${output} + echo "Return code ${retcode} doesn't match expected return code ${expected_retcode}" + return 1 + fi + if [[ -n "${expected_string}" ]]; then + local found=1 + grep "${expected_string}" ${output} > /dev/null || found=0 + if (( ! found )); then + cat ${output} + echo "String '${expected_string}' not found" + return 1 + fi + else + if [[ -s ${output} ]]; then + ls ${output} + cat ${output} + echo "Unexpected output" + return 1 + fi + fi + echo "'$@' returns code ${expected_retcode} and displays '${expected_string}'" +} + +header "Testing initialization" + +test_function 1 "error: missing version" initialize --version +test_function 1 "error: version format" initialize --version a +test_function 1 "error: version format" initialize --version 0.0 +test_function 0 "" initialize --version 1.0.0 + +test_function 1 "error: missing branch" initialize --branch +test_function 1 "error: branch name must be" initialize --branch a +test_function 1 "error: branch name must be" initialize --branch 0.0 +test_function 0 "" initialize --branch release-0.0 + +test_function 1 "error: missing release notes" initialize --release-notes +test_function 1 "error: file a doesn't" initialize --release-notes a +test_function 0 "" initialize --release-notes $(mktemp) + +header "All tests passed" diff --git a/vendor/github.com/knative/test-infra/tools/README.md b/vendor/github.com/knative/test-infra/tools/README.md new file mode 100644 index 00000000000..d4cf2a272f2 --- /dev/null +++ b/vendor/github.com/knative/test-infra/tools/README.md @@ -0,0 +1,3 @@ +# Test Infrastructure tools + +This directory contains tools used by our Prow jobs. diff --git a/vendor/github.com/knative/test-infra/tools/apicoverage/README.md b/vendor/github.com/knative/test-infra/tools/apicoverage/README.md new file mode 100644 index 00000000000..01ddf855151 --- /dev/null +++ b/vendor/github.com/knative/test-infra/tools/apicoverage/README.md @@ -0,0 +1,14 @@ +# API Coverage Tool +This tool is designed to show the field level coverage exercised by the conformance tests. + +## Read from GCS +This tool reads the logs from the latest continous build of knative/serving. The logs have the information of which CRD objects are being created and which fields are being set for the testing. +It uses the service account passed in or by default will use the GOOGLE_APPLICATION_CREDENTIALS variable to get the logs. + +## Creating Output +This tool creates an output xml in the prow artifacts directory. The prow artifacts directory is passed in or by default will use `./artifacts` directory. + +This output xml will be read by testgrid and displayed on the [dashboard](https://testgrid.knative.dev/knative-serving#api-coverage). + +## Prow Job +There is a daily prow job that triggers this tool that is run at 01:05 AM PST. This tool will then generate the output xml which is then displayed in the testgrid dashboard. diff --git a/vendor/github.com/knative/test-infra/tools/apicoverage/apicoverage.go b/vendor/github.com/knative/test-infra/tools/apicoverage/apicoverage.go new file mode 100644 index 00000000000..6e007a7270b --- /dev/null +++ b/vendor/github.com/knative/test-infra/tools/apicoverage/apicoverage.go @@ -0,0 +1,241 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// apicoverage.go parses the log file and outputs the api coverage numbers in a +// testgrid expected output xml file + +package main + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "log" + "os" + "reflect" + "strings" + + "github.com/knative/serving/pkg/apis/serving/v1alpha1" + "github.com/knative/test-infra/tools/gcs" + "github.com/knative/test-infra/tools/testgrid" +) + +const ( + logDir = "logs/ci-knative-serving-continuous/" + buildFile = "build-log.txt" + apiCoverage = "api_coverage" + overallRoute = "OverallRoute" + overallConfig = "OverallConfiguration" + overallService = "OverallService" +) + +// ResourceObjects defines the resource objects in knative-serving +type ResourceObjects struct { + Route *v1alpha1.Route + Configuration *v1alpha1.Configuration + Service *v1alpha1.Service +} + +// OverallAPICoverage defines the overall api coverage for knative serving +type OverallAPICoverage struct { + RouteAPICovered map[string]int + RouteAPINotCovered map[string]int + ConfigurationAPICovered map[string]int + ConfigurationAPINotCovered map[string]int + ServiceAPICovered map[string]int + ServiceAPINotCovered map[string]int +} + +type apiObjectName string + +const ( + apiObjectRoute apiObjectName = "route" + apiObjectConfiguration = "configuration" + apiObjectService = "service" +) + +// check if the object value is nil or empty. +// Uses https://golang.org/pkg/reflect/#Kind to get the variable type +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + } + return false +} + +func isStruct(v reflect.Value) bool { + return v.Kind() == reflect.Struct +} + +// Parse the struct and returns a map of +func parseStruct(v reflect.Value) map[string]reflect.Value { + f := make(map[string]reflect.Value) + + for i := 0; i < v.NumField(); i++ { + // Include only public vars. https://golang.org/pkg/reflect/#StructField. + if len(v.Type().Field(i).PkgPath) == 0 { + f[v.Type().Field(i).Name] = v.Field(i) + } + } + + return f +} + +func incrementCoverageValues(name string, covered map[string]int) { + if i, ok := covered[name]; ok { + covered[name] = i + 1 + } else { + covered[name] = 1 + } +} + +func handleCovered(name string, coverage *OverallAPICoverage) { + if strings.HasPrefix(name, "route") { + incrementCoverageValues(name, coverage.RouteAPICovered) + } else if strings.HasPrefix(name, "configuration") { + incrementCoverageValues(name, coverage.ConfigurationAPICovered) + } else if strings.HasPrefix(name, "service") { + incrementCoverageValues(name, coverage.ServiceAPICovered) + } +} + +func handleNotCovered(name string, coverage *OverallAPICoverage) { + if strings.HasPrefix(name, "route") { + coverage.RouteAPINotCovered[name] = 0 + } else if strings.HasPrefix(name, "configuration") { + coverage.ConfigurationAPINotCovered[name] = 0 + } else if strings.HasPrefix(name, "service") { + coverage.ServiceAPINotCovered[name] = 0 + } +} + +func getCoverage(value reflect.Value, name string, coverage *OverallAPICoverage) { + // Parse all the fields in the struct + for key, v := range parseStruct(value) { + name := name + "." + key + if isStruct(v) { + getCoverage(v, name, coverage) + } else { + // check if it is empty/nil + if isNil(v) { + handleNotCovered(name, coverage) + } else { + handleCovered(name, coverage) + } + } + } +} + +func calculateCoverage(covLogs []string, coverage *OverallAPICoverage) { + if len(covLogs) == 0 { + return + } + + for _, f := range covLogs { + var obj ResourceObjects + if err := json.Unmarshal([]byte(f), &obj); err != nil { + log.Fatalf("Cannot read resource object: %v", err) + } else { + if obj.Route != nil { + getCoverage(reflect.ValueOf(obj.Route).Elem(), "route", coverage) + } else if obj.Configuration != nil { + getCoverage(reflect.ValueOf(obj.Configuration).Elem(), "configuration", coverage) + } else if obj.Service != nil { + getCoverage(reflect.ValueOf(obj.Service).Elem(), "service", coverage) + } + } + } +} + +func initCoverage() *OverallAPICoverage { + coverage := OverallAPICoverage{} + coverage.RouteAPICovered = make(map[string]int) + coverage.RouteAPINotCovered = make(map[string]int) + coverage.ConfigurationAPICovered = make(map[string]int) + coverage.ConfigurationAPINotCovered = make(map[string]int) + coverage.ServiceAPICovered = make(map[string]int) + coverage.ServiceAPINotCovered = make(map[string]int) + + return &coverage +} + +func getRelevantLogs(fields []string) *string { + // I0727 16:23:30.055] 2018-10-12T18:18:06.835-0700 info TestRouteCreation test/configuration.go:34 resource {: }"} + if len(fields) == 8 && fields[3] == "info" && fields[6] == "resource" { + s := strings.Join(fields[7:], " ") + return &s + } + return nil +} + +func createCases(tcName string, covered map[string]int, notCovered map[string]int) []testgrid.TestCase { + var tc []testgrid.TestCase + + var percentCovered = float32(100 * len(covered) / (len(covered) + len(notCovered))) + tp := []testgrid.TestProperty{testgrid.TestProperty{Name: apiCoverage, Value: percentCovered}} + tc = append(tc, testgrid.TestCase{Name: tcName, Properties: testgrid.TestProperties{Property: tp}, Fail: false}) + + for key, value := range covered { + tp := []testgrid.TestProperty{testgrid.TestProperty{Name: apiCoverage, Value: float32(value)}} + tc = append(tc, testgrid.TestCase{Name: tcName + "/" + key, Properties: testgrid.TestProperties{Property: tp}, Fail: false}) + } + + for key, value := range notCovered { + tp := []testgrid.TestProperty{testgrid.TestProperty{Name: apiCoverage, Value: float32(value)}} + tc = append(tc, testgrid.TestCase{Name: tcName + "/" + key, Properties: testgrid.TestProperties{Property: tp}, Fail: true}) + } + return tc +} + +func createTestgridXML(coverage *OverallAPICoverage, artifactsDir string) { + tc := createCases(overallRoute, coverage.RouteAPICovered, coverage.RouteAPINotCovered) + tc = append(tc, createCases(overallConfig, coverage.ConfigurationAPICovered, coverage.ConfigurationAPINotCovered)...) + tc = append(tc, createCases(overallService, coverage.ServiceAPICovered, coverage.ServiceAPINotCovered)...) + ts := testgrid.TestSuite{TestCases: tc} + + if err := testgrid.CreateXMLOutput(ts, artifactsDir); err != nil { + log.Fatalf("Cannot create the xml output file: %v", err) + } +} + +func main() { + + artifactsDir := flag.String("artifacts-dir", "./artifacts", "Directory to store the generated XML file") + serviceAccount := flag.String("service-account", os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"), "JSON key file for service account to use") + flag.Parse() + + // Read the latest-build.txt file to get the latest build number + ctx := context.Background() + num, err := gcs.GetLatestBuildNumber(ctx, logDir, *serviceAccount) + if err != nil { + log.Fatalf("Cannot get latest build number: %v", err) + } + + // Calculate coverage + coverage := initCoverage() + calculateCoverage( + gcs.ParseLog(ctx, fmt.Sprintf("%s/%d/%s", logDir, num, buildFile), getRelevantLogs), + coverage) + + // Write the testgrid xml to artifacts + createTestgridXML(coverage, *artifactsDir) +} diff --git a/vendor/github.com/knative/test-infra/tools/dep-collector/README.md b/vendor/github.com/knative/test-infra/tools/dep-collector/README.md new file mode 100644 index 00000000000..9acf6cef809 --- /dev/null +++ b/vendor/github.com/knative/test-infra/tools/dep-collector/README.md @@ -0,0 +1,88 @@ +# dep-collector + +`dep-collector` is a tool for gathering up a collection of licenses for Go +dependencies that have been pulled into the idiomatic `vendor/` directory. +The resulting file from running `dep-collector` is intended for inclusion +in container images to respect the licenses of the included software. + +### Basic Usage + +You can run `dep-collector` on one or more Go import paths as entrypoints, +and it will: +1. Walk the transitive dependencies to identify vendored software packages, +1. Search for licenses for each vendored dependency, +1. Dump a file containing the licenses for each vendored import. + +For example (single import path): +```shell +$ dep-collector . +=========================================================== +Import: github.com/mattmoor/dep-collector/vendor/github.com/google/licenseclassifier + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ +... + +``` + +For example (multiple import paths): + +```shell +$ dep-collector ./cmd/controller ./cmd/sleeper + +=========================================================== +Import: github.com/mattmoor/warm-image/vendor/cloud.google.com/go + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ +``` + +### CSV Usage + +You can also run `dep-collector` in a mode that produces CSV output, +including basic classification of the license. + +> In order to run dep-collector in this mode, you must first run: +> go get github.com/google/licenseclassifier + +For example: + +```shell +$ dep-collector -csv . +github.com/google/licenseclassifier,Static,,https://github.com/mattmoor/dep-collector/blob/master/vendor/github.com/google/licenseclassifier/LICENSE,Apache-2.0 +github.com/google/licenseclassifier/stringclassifier,Static,,https://github.com/mattmoor/dep-collector/blob/master/vendor/github.com/google/licenseclassifier/stringclassifier/LICENSE,Apache-2.0 +github.com/sergi/go-diff,Static,,https://github.com/mattmoor/dep-collector/blob/master/vendor/github.com/sergi/go-diff/LICENSE,MIT + +``` + +The columns here are: +* Import Path, +* How the dependency is linked in (always reports "static"), +* A column for whether any modifications have been made (always empty), +* The URL by which to access the license file (assumes `master`), +* A classification of what license this is ([using this](https://github.com/google/licenseclassifier)). + + +### Check mode + +`dep-collector` also includes a mode that will check for "forbidden" licenses. + +> In order to run dep-collector in this mode, you must first run: +> go get github.com/google/licenseclassifier + +For example (failing): +```shell +$ dep-collector -check ./foo/bar/baz +2018/07/20 22:01:29 Error checking license collection: Errors validating licenses: +Found matching forbidden license in "foo.io/bar/vendor/github.com/BurntSushi/toml": WTFPL +``` + +For example (passing): + +```shell +$ dep-collector -check . +2018/07/20 22:29:09 No errors found. +``` diff --git a/vendor/github.com/knative/test-infra/tools/dep-collector/imports.go b/vendor/github.com/knative/test-infra/tools/dep-collector/imports.go new file mode 100644 index 00000000000..924ce410228 --- /dev/null +++ b/vendor/github.com/knative/test-infra/tools/dep-collector/imports.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + gb "go/build" + "path/filepath" + "sort" + "strings" +) + +func CollectTransitiveImports(binaries []string) ([]string, error) { + // Perform a simple DFS to collect the binaries' transitive dependencies. + visited := make(map[string]struct{}) + for _, importpath := range binaries { + if gb.IsLocalImport(importpath) { + ip, err := qualifyLocalImport(importpath) + if err != nil { + return nil, err + } + importpath = ip + } + + pkg, err := gb.Import(importpath, WorkingDir, gb.ImportComment) + if err != nil { + return nil, err + } + if err := visit(pkg, visited); err != nil { + return nil, err + } + } + + // Sort the dependencies deterministically. + var list sort.StringSlice + for ip := range visited { + if !strings.Contains(ip, "/vendor/") { + // Skip files outside of vendor + continue + } + list = append(list, ip) + } + list.Sort() + + return list, nil +} + +func qualifyLocalImport(ip string) (string, error) { + gopathsrc := filepath.Join(gb.Default.GOPATH, "src") + if !strings.HasPrefix(WorkingDir, gopathsrc) { + return "", fmt.Errorf("working directory must be on ${GOPATH}/src = ", gopathsrc) + } + return filepath.Join(strings.TrimPrefix(WorkingDir, gopathsrc+string(filepath.Separator)), ip), nil +} + +func visit(pkg *gb.Package, visited map[string]struct{}) error { + if _, ok := visited[pkg.ImportPath]; ok { + return nil + } + visited[pkg.ImportPath] = struct{}{} + + for _, ip := range pkg.Imports { + if ip == "C" { + // skip cgo + continue + } + subpkg, err := gb.Import(ip, WorkingDir, gb.ImportComment) + if err != nil { + return fmt.Errorf("%v\n -> %v", pkg.ImportPath, err) + } + if !strings.HasPrefix(subpkg.Dir, WorkingDir) { + // Skip import paths outside of our workspace (std library) + continue + } + if err := visit(subpkg, visited); err != nil { + return fmt.Errorf("%v (%v)\n -> %v", pkg.ImportPath, pkg.Dir, err) + } + } + return nil +} diff --git a/vendor/github.com/knative/test-infra/tools/dep-collector/licenses.go b/vendor/github.com/knative/test-infra/tools/dep-collector/licenses.go new file mode 100644 index 00000000000..cb1df9ab748 --- /dev/null +++ b/vendor/github.com/knative/test-infra/tools/dep-collector/licenses.go @@ -0,0 +1,203 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + gb "go/build" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/google/licenseclassifier" +) + +var LicenseNames = []string{ + "LICENCE", + "LICENSE", + "LICENSE.code", + "LICENSE.md", + "LICENSE.txt", + "COPYING", + "copyright", +} + +const MatchThreshold = 0.9 + +type LicenseFile struct { + EnclosingImportPath string + LicensePath string +} + +func (lf *LicenseFile) Body() (string, error) { + body, err := ioutil.ReadFile(lf.LicensePath) + if err != nil { + return "", err + } + return string(body), nil +} + +func (lt *LicenseFile) Classify(classifier *licenseclassifier.License) (string, error) { + body, err := lt.Body() + if err != nil { + return "", err + } + m := classifier.NearestMatch(body) + if m == nil { + return "", fmt.Errorf("unable to classify license: %v", lt.EnclosingImportPath) + } + return m.Name, nil +} + +func (lt *LicenseFile) Check(classifier *licenseclassifier.License) error { + body, err := lt.Body() + if err != nil { + return err + } + ms := classifier.MultipleMatch(body, false) + for _, m := range ms { + return fmt.Errorf("Found matching forbidden license in %q: %v", lt.EnclosingImportPath, m.Name) + } + return nil +} + +func (lt *LicenseFile) Entry() (string, error) { + body, err := lt.Body() + if err != nil { + return "", err + } + return fmt.Sprintf(` +=========================================================== +Import: %s + +%s +`, lt.EnclosingImportPath, body), nil +} + +func (lt *LicenseFile) CSVRow(classifier *licenseclassifier.License) (string, error) { + classification, err := lt.Classify(classifier) + if err != nil { + return "", err + } + parts := strings.Split(lt.EnclosingImportPath, "/vendor/") + if len(parts) != 2 { + return "", fmt.Errorf("wrong number of parts splitting import path on %q : %q", "/vendor/", lt.EnclosingImportPath) + } + return strings.Join([]string{ + parts[1], + "Static", + "", // TODO(mattmoor): Modifications? + "https://" + parts[0] + "/blob/master/vendor/" + parts[1] + "/" + filepath.Base(lt.LicensePath), + classification, + }, ","), nil +} + +func findLicense(ip string) (*LicenseFile, error) { + pkg, err := gb.Import(ip, WorkingDir, gb.ImportComment) + if err != nil { + return nil, err + } + dir := pkg.Dir + + for { + // When we reach the root of our workspace, stop searching. + if dir == WorkingDir { + return nil, fmt.Errorf("unable to find license for %q", pkg.ImportPath) + } + + for _, name := range LicenseNames { + p := filepath.Join(dir, name) + if _, err := os.Stat(p); err != nil { + continue + } + + return &LicenseFile{ + EnclosingImportPath: ip, + LicensePath: p, + }, nil + } + + // Walk to the parent directory / import path + dir = filepath.Dir(dir) + ip = filepath.Dir(ip) + } +} + +type LicenseCollection []*LicenseFile + +func (lc LicenseCollection) Entries() (string, error) { + sections := make([]string, 0, len(lc)) + for _, key := range lc { + entry, err := key.Entry() + if err != nil { + return "", err + } + sections = append(sections, entry) + } + return strings.Join(sections, "\n"), nil +} + +func (lc LicenseCollection) CSV(classifier *licenseclassifier.License) (string, error) { + sections := make([]string, 0, len(lc)) + for _, entry := range lc { + row, err := entry.CSVRow(classifier) + if err != nil { + return "", err + } + sections = append(sections, row) + } + return strings.Join(sections, "\n"), nil +} + +func (lc LicenseCollection) Check(classifier *licenseclassifier.License) error { + errors := []string{} + for _, entry := range lc { + if err := entry.Check(classifier); err != nil { + errors = append(errors, err.Error()) + } + } + if len(errors) == 0 { + return nil + } + return fmt.Errorf("Errors validating licenses:\n%v", strings.Join(errors, "\n")) +} + +func CollectLicenses(imports []string) (LicenseCollection, error) { + // for each of the import paths, search for a license file. + licenseFiles := make(map[string]*LicenseFile) + for _, ip := range imports { + lf, err := findLicense(ip) + if err != nil { + return nil, err + } + licenseFiles[lf.EnclosingImportPath] = lf + } + + order := sort.StringSlice{} + for key := range licenseFiles { + order = append(order, key) + } + order.Sort() + + licenseTypes := LicenseCollection{} + for _, key := range order { + licenseTypes = append(licenseTypes, licenseFiles[key]) + } + return licenseTypes, nil +} diff --git a/vendor/github.com/knative/test-infra/tools/dep-collector/main.go b/vendor/github.com/knative/test-infra/tools/dep-collector/main.go new file mode 100644 index 00000000000..4532942751d --- /dev/null +++ b/vendor/github.com/knative/test-infra/tools/dep-collector/main.go @@ -0,0 +1,81 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "log" + "os" + + "github.com/google/licenseclassifier" +) + +var WorkingDir, _ = os.Getwd() + +var ( + csv = flag.Bool("csv", false, "Whether to print in CSV format (with slow classification).") + check = flag.Bool("check", false, "Whether to just check license files for forbidden licenses.") +) + +func main() { + flag.Parse() + if flag.NArg() == 0 { + log.Fatalf("Expected a list of import paths, got: %v", flag.Args()) + } + + // Perform a simple DFS to collect the binaries' transitive dependencies. + transitiveImports, err := CollectTransitiveImports(flag.Args()) + if err != nil { + log.Fatalf("Error collecting transitive dependencies: %v", err) + } + + // Gather all of the license data from the imports. + collection, err := CollectLicenses(transitiveImports) + if err != nil { + log.Fatalf("Error identifying licenses for transitive dependencies: %v", err) + } + + if *check { + classifier, err := licenseclassifier.NewWithForbiddenLicenses(MatchThreshold) + if err != nil { + log.Fatalf("Error creating license classifier: %v", err) + } + if err := collection.Check(classifier); err != nil { + log.Fatalf("Error checking license collection: %v", err) + } + log.Printf("No errors found.") + return + } + + if *csv { + classifier, err := licenseclassifier.New(MatchThreshold) + if err != nil { + log.Fatalf("Error creating license classifier: %v", err) + } + output, err := collection.CSV(classifier) + if err != nil { + log.Fatalf("Error generating CSV: %v", err) + } + os.Stdout.Write([]byte(output)) + } else { + entries, err := collection.Entries() + if err != nil { + log.Fatalf("Error generating entries: %v", err) + } + os.Stdout.Write([]byte(entries)) + } +} diff --git a/vendor/github.com/knative/test-infra/tools/gcs/gcs.go b/vendor/github.com/knative/test-infra/tools/gcs/gcs.go new file mode 100644 index 00000000000..a41fbbb21a0 --- /dev/null +++ b/vendor/github.com/knative/test-infra/tools/gcs/gcs.go @@ -0,0 +1,112 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// gcs.go defines functions to use GCS + +package gcs + +import ( + "bufio" + "context" + "fmt" + "io/ioutil" + "log" + "strconv" + "strings" + + "cloud.google.com/go/storage" + "google.golang.org/api/option" +) + +const ( + bucketName = "knative-prow" + latest = "latest-build.txt" +) + +var client *storage.Client + +func createStorageClient(ctx context.Context, sa string) error { + var err error + client, err = storage.NewClient(ctx, option.WithCredentialsFile(sa)) + return err +} + +func createStorageObject(filename string) *storage.ObjectHandle { + return client.Bucket(bucketName).Object(filename) +} + +// GetLatestBuildNumber gets the latest build number for the specified log directory +func GetLatestBuildNumber(ctx context.Context, logDir string, sa string) (int, error) { + contents, err := ReadGcsFile(ctx, logDir+latest, sa) + if err != nil { + return 0, err + } + latestBuild, err := strconv.Atoi(string(contents)) + if err != nil { + return 0, err + } + + return latestBuild, nil +} + +//ReadGcsFile reads the specified file using the provided service account +func ReadGcsFile(ctx context.Context, filename string, sa string) ([]byte, error) { + // Create a new GCS client + if err := createStorageClient(ctx, sa); err != nil { + log.Fatalf("Failed to create GCS client: %v", err) + } + o := createStorageObject(filename) + if _, err := o.Attrs(ctx); err != nil { + return []byte(fmt.Sprintf("Cannot get attributes of '%s'", filename)), err + } + f, err := o.NewReader(ctx) + if err != nil { + return []byte(fmt.Sprintf("Cannot open '%s'", filename)), err + } + defer f.Close() + contents, err := ioutil.ReadAll(f) + if err != nil { + return []byte(fmt.Sprintf("Cannot read '%s'", filename)), err + } + return contents, nil +} + +// ParseLog parses the log and returns the lines where the checkLog func does not return an empty slice. +// checkLog function should take in the log statement and return a part from that statement that should be in the log output. +func ParseLog(ctx context.Context, filename string, checkLog func(s []string) *string) []string { + var logs []string + + log.Printf("Parsing '%s'", filename) + o := createStorageObject(filename) + if _, err := o.Attrs(ctx); err != nil { + log.Printf("Cannot get attributes of '%s', assuming not ready yet: %v", filename, err) + return nil + } + f, err := o.NewReader(ctx) + if err != nil { + log.Fatalf("Error opening '%s': %v", filename, err) + } + defer f.Close() + + scanner := bufio.NewScanner(f) + + for scanner.Scan() { + if s := checkLog(strings.Fields(scanner.Text())); s != nil { + logs = append(logs, *s) + } + } + return logs +} diff --git a/vendor/github.com/knative/test-infra/tools/githubhelper/Makefile b/vendor/github.com/knative/test-infra/tools/githubhelper/Makefile new file mode 100644 index 00000000000..c8fef33a770 --- /dev/null +++ b/vendor/github.com/knative/test-infra/tools/githubhelper/Makefile @@ -0,0 +1,17 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +all: + go get -u github.com/google/go-github/github + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build . diff --git a/vendor/github.com/knative/test-infra/tools/githubhelper/README.md b/vendor/github.com/knative/test-infra/tools/githubhelper/README.md new file mode 100644 index 00000000000..5975a23521b --- /dev/null +++ b/vendor/github.com/knative/test-infra/tools/githubhelper/README.md @@ -0,0 +1,10 @@ +# GitHub Helper Tool + +This tool is designed to interact with GitHub, providing useful data for a Prow job. Actions performed and the output are governed by the flags used. + +Currently the tool makes unauthenticated requests to GitHub API. + +## Flags + +* `-list-changed-files` will list the files that are touched by the current PR in a Prow job. +* `-verbose` will dump extra info on output when executing the comments; it is intended for debugging. diff --git a/vendor/github.com/knative/test-infra/tools/githubhelper/githubhelper.go b/vendor/github.com/knative/test-infra/tools/githubhelper/githubhelper.go new file mode 100644 index 00000000000..d45fad475cc --- /dev/null +++ b/vendor/github.com/knative/test-infra/tools/githubhelper/githubhelper.go @@ -0,0 +1,85 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// githubhelper.go interacts with GitHub, providing useful data for a Prow job. + +package main + +import ( + "context" + "flag" + "fmt" + "log" + "os" + "strconv" + + "github.com/google/go-github/github" +) + +var ( + // Info about the current PR + repoOwner = os.Getenv("REPO_OWNER") + repoName = os.Getenv("REPO_NAME") + pullNumber = atoi(os.Getenv("PULL_NUMBER"), "pull number") + + // Shared useful variables + ctx = context.Background() + onePageList = &github.ListOptions{Page: 1} + verbose = false + anonymousGitHubClient *github.Client +) + +// atoi is a convenience function to convert a string to integer, failing in case of error. +func atoi(str, valueName string) int { + value, err := strconv.Atoi(str) + if err != nil { + log.Fatalf("Unexpected non number '%s' for %s: %v", str, valueName, err) + } + return value +} + +// infof if a convenience wrapper around log.Infof, and does nothing unless --verbose is passed. +func infof(template string, args ...interface{}) { + if verbose { + log.Printf(template, args...) + } +} + +// listChangedFiles simply lists the files changed by the current PR. +func listChangedFiles() { + infof("Listing changed files for PR %d in repository %s/%s", pullNumber, repoOwner, repoName) + files, _, err := anonymousGitHubClient.PullRequests.ListFiles(ctx, repoOwner, repoName, pullNumber, onePageList) + if err != nil { + log.Fatalf("Error listing files: %v", err) + } + for _, file := range files { + fmt.Println(*file.Filename) + } +} + +func main() { + listChangedFilesFlag := flag.Bool("list-changed-files", false, "List the files changed by the current pull request") + verboseFlag := flag.Bool("verbose", false, "Whether to dump extra info on output or not; intended for debugging") + flag.Parse() + + verbose = *verboseFlag + anonymousGitHubClient = github.NewClient(nil) + + if *listChangedFilesFlag { + listChangedFiles() + } +} + diff --git a/vendor/github.com/knative/test-infra/tools/testgrid/testgrid.go b/vendor/github.com/knative/test-infra/tools/testgrid/testgrid.go new file mode 100644 index 00000000000..30d7ff2c13c --- /dev/null +++ b/vendor/github.com/knative/test-infra/tools/testgrid/testgrid.go @@ -0,0 +1,69 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// testgrid.go provides methods to perform action on testgrid. + +package testgrid + +import ( + "encoding/xml" + "os" +) + +// TestProperty defines a property of the test +type TestProperty struct { + Name string `xml:"name,attr"` + Value float32 `xml:"value,attr"` +} + +// TestProperties is an array of test properties +type TestProperties struct { + Property []TestProperty `xml:"property"` +} + +// TestCase defines a test case that was executed +type TestCase struct { + ClassName string `xml:"class_name,attr"` + Name string `xml:"name,attr"` + Time int `xml:"time,attr"` + Properties TestProperties `xml:"properties"` + Fail bool `xml:"failure,omitempty"` +} + +// TestSuite defines the set of relevant test cases +type TestSuite struct { + XMLName xml.Name `xml:"testsuite"` + TestCases []TestCase `xml:"testcase"` +} + +// CreateXMLOutput creates the junit xml file in the provided artifacts directory +func CreateXMLOutput(ts TestSuite, artifactsDir string) error { + op, err := xml.MarshalIndent(ts, "", " ") + if err != nil { + return err + } + + outputFile := artifactsDir + "/junit_bazel.xml" + f, err := os.Create(outputFile) + if err != nil { + return err + } + defer f.Close() + if _, err := f.WriteString(string(op) + "\n"); err != nil { + return err + } + return nil +} From 240c125457e22094de95f8c881551d60608d0d3d Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Tue, 6 Nov 2018 15:24:18 -0800 Subject: [PATCH 18/20] Fix conflicts and unit tests --- Gopkg.lock | 52 + Gopkg.toml | 4 - .../kafka/controller/channel/provider.go | 23 +- .../kafka/controller/channel/reconcile.go | 13 +- .../controller/channel/reconcile_test.go | 40 +- third_party/VENDOR-LICENSE | 209 ++ vendor/github.com/Shopify/sarama/LICENSE | 20 + .../github.com/Shopify/sarama/acl_bindings.go | 119 + .../Shopify/sarama/acl_create_request.go | 76 + .../Shopify/sarama/acl_create_response.go | 88 + .../Shopify/sarama/acl_delete_request.go | 48 + .../Shopify/sarama/acl_delete_response.go | 155 ++ .../Shopify/sarama/acl_describe_request.go | 25 + .../Shopify/sarama/acl_describe_response.go | 80 + .../github.com/Shopify/sarama/acl_filter.go | 61 + vendor/github.com/Shopify/sarama/acl_types.go | 42 + .../sarama/add_offsets_to_txn_request.go | 52 + .../sarama/add_offsets_to_txn_response.go | 44 + .../sarama/add_partitions_to_txn_request.go | 76 + .../sarama/add_partitions_to_txn_response.go | 108 + .../Shopify/sarama/alter_configs_request.go | 120 + .../Shopify/sarama/alter_configs_response.go | 95 + .../Shopify/sarama/api_versions_request.go | 24 + .../Shopify/sarama/api_versions_response.go | 87 + .../Shopify/sarama/async_producer.go | 932 +++++++ .../Shopify/sarama/config_resource_type.go | 15 + vendor/github.com/Shopify/sarama/consumer.go | 807 ++++++ .../Shopify/sarama/consumer_group_members.go | 94 + .../sarama/consumer_metadata_request.go | 33 + .../sarama/consumer_metadata_response.go | 77 + .../github.com/Shopify/sarama/crc32_field.go | 69 + .../sarama/create_partitions_request.go | 121 + .../sarama/create_partitions_response.go | 94 + .../Shopify/sarama/create_topics_request.go | 174 ++ .../Shopify/sarama/create_topics_response.go | 112 + .../Shopify/sarama/delete_groups_request.go | 30 + .../Shopify/sarama/delete_groups_response.go | 70 + .../Shopify/sarama/delete_records_request.go | 126 + .../Shopify/sarama/delete_records_response.go | 158 ++ .../Shopify/sarama/delete_topics_request.go | 48 + .../Shopify/sarama/delete_topics_response.go | 78 + .../sarama/describe_configs_request.go | 91 + .../sarama/describe_configs_response.go | 188 ++ .../Shopify/sarama/describe_groups_request.go | 30 + .../sarama/describe_groups_response.go | 187 ++ .../Shopify/sarama/encoder_decoder.go | 89 + .../Shopify/sarama/end_txn_request.go | 50 + .../Shopify/sarama/end_txn_response.go | 44 + vendor/github.com/Shopify/sarama/errors.go | 281 +++ .../Shopify/sarama/fetch_request.go | 170 ++ .../sarama/find_coordinator_request.go | 61 + .../sarama/find_coordinator_response.go | 92 + .../Shopify/sarama/heartbeat_request.go | 47 + .../Shopify/sarama/heartbeat_response.go | 32 + .../sarama/init_producer_id_request.go | 43 + .../sarama/init_producer_id_response.go | 55 + .../Shopify/sarama/join_group_request.go | 163 ++ .../Shopify/sarama/join_group_response.go | 135 + .../Shopify/sarama/leave_group_request.go | 40 + .../Shopify/sarama/leave_group_response.go | 32 + .../Shopify/sarama/list_groups_request.go | 24 + .../Shopify/sarama/list_groups_response.go | 69 + vendor/github.com/Shopify/sarama/message.go | 223 ++ vendor/github.com/Shopify/sarama/metrics.go | 51 + .../github.com/Shopify/sarama/mockbroker.go | 330 +++ .../Shopify/sarama/mockresponses.go | 727 ++++++ .../Shopify/sarama/offset_commit_request.go | 204 ++ .../Shopify/sarama/offset_commit_response.go | 85 + .../Shopify/sarama/offset_fetch_request.go | 81 + .../Shopify/sarama/offset_fetch_response.go | 143 ++ .../Shopify/sarama/offset_request.go | 132 + .../Shopify/sarama/offset_response.go | 174 ++ .../Shopify/sarama/packet_decoder.go | 60 + .../Shopify/sarama/packet_encoder.go | 65 + .../github.com/Shopify/sarama/partitioner.go | 217 ++ .../github.com/Shopify/sarama/prep_encoder.go | 153 ++ .../Shopify/sarama/produce_request.go | 252 ++ .../Shopify/sarama/produce_response.go | 183 ++ .../github.com/Shopify/sarama/produce_set.go | 252 ++ .../github.com/Shopify/sarama/real_decoder.go | 324 +++ .../github.com/Shopify/sarama/real_encoder.go | 156 ++ vendor/github.com/Shopify/sarama/record.go | 113 + .../github.com/Shopify/sarama/record_batch.go | 268 ++ vendor/github.com/Shopify/sarama/request.go | 149 ++ .../Shopify/sarama/response_header.go | 21 + vendor/github.com/Shopify/sarama/sarama.go | 99 + .../Shopify/sarama/sasl_handshake_request.go | 33 + .../Shopify/sarama/sasl_handshake_response.go | 38 + .../Shopify/sarama/sync_group_request.go | 100 + .../Shopify/sarama/sync_group_response.go | 41 + .../Shopify/sarama/sync_producer.go | 149 ++ vendor/github.com/Shopify/sarama/timestamp.go | 40 + .../sarama/txn_offset_commit_request.go | 126 + .../sarama/txn_offset_commit_response.go | 83 + .../github.com/eapache/go-resiliency/LICENSE | 22 + .../eapache/go-resiliency/breaker/breaker.go | 161 ++ .../eapache/go-xerial-snappy/LICENSE | 21 + .../eapache/go-xerial-snappy/fuzz.go | 16 + .../eapache/go-xerial-snappy/snappy.go | 131 + vendor/github.com/eapache/queue/LICENSE | 21 + vendor/github.com/eapache/queue/queue.go | 102 + vendor/github.com/golang/snappy/AUTHORS | 15 + vendor/github.com/golang/snappy/CONTRIBUTORS | 37 + vendor/github.com/golang/snappy/LICENSE | 27 + vendor/github.com/golang/snappy/decode.go | 237 ++ .../github.com/golang/snappy/decode_amd64.go | 14 + .../github.com/golang/snappy/decode_amd64.s | 490 ++++ .../github.com/golang/snappy/decode_other.go | 101 + vendor/github.com/golang/snappy/encode.go | 285 +++ .../github.com/golang/snappy/encode_amd64.go | 29 + .../github.com/golang/snappy/encode_amd64.s | 730 ++++++ .../github.com/golang/snappy/encode_other.go | 238 ++ vendor/github.com/golang/snappy/snappy.go | 98 + .../.github/pull-request-template.md | 7 - .../knative/test-infra/CONTRIBUTING.md | 3 - .../github.com/knative/test-infra/Gopkg.lock | 28 - .../github.com/knative/test-infra/Gopkg.toml | 14 - vendor/github.com/knative/test-infra/LICENSE | 202 -- vendor/github.com/knative/test-infra/OWNERS | 7 - .../github.com/knative/test-infra/README.md | 17 - .../github.com/knative/test-infra/WORKSPACE | 52 - .../knative/test-infra/ci/README.md | 3 - .../knative/test-infra/ci/gubernator/Makefile | 33 - .../test-infra/ci/gubernator/README.md | 7 - .../test-infra/ci/gubernator/config.yaml | 71 - .../test-infra/ci/gubernator/redir_github.py | 25 - .../knative/test-infra/ci/prow/Makefile | 42 - .../knative/test-infra/ci/prow/README.md | 10 - .../test-infra/ci/prow/boskos/README.md | 6 - .../test-infra/ci/prow/boskos/config.yaml | 152 -- .../ci/prow/boskos/config_start.yaml | 23 - .../test-infra/ci/prow/boskos/resources.yaml | 38 - .../knative/test-infra/ci/prow/cluster.yaml | 350 --- .../knative/test-infra/ci/prow/config.yaml | 2211 ----------------- .../test-infra/ci/prow/config_start.yaml | 339 --- .../knative/test-infra/ci/prow/plugins.yaml | 41 - .../knative/test-infra/ci/prow/prow_setup.md | 71 - .../knative/test-infra/ci/testgrid/Makefile | 29 - .../knative/test-infra/ci/testgrid/README.md | 6 - .../test-infra/ci/testgrid/config.yaml | 213 -- vendor/github.com/knative/test-infra/dummy.go | 10 - .../knative/test-infra/images/README.md | 3 - .../test-infra/images/apicoverage/Dockerfile | 20 - .../test-infra/images/apicoverage/Makefile | 23 - .../test-infra/images/apicoverage/README.md | 3 - .../test-infra/images/prow-tests/Dockerfile | 56 - .../test-infra/images/prow-tests/Makefile | 34 - .../test-infra/images/prow-tests/README.md | 13 - .../knative/test-infra/test/e2e-tests.sh | 50 - .../test-infra/test/presubmit-tests.sh | 49 - .../test/unit/e2e-custom-flag-tests.sh | 38 - .../test-infra/test/unit/library-tests.sh | 50 - ...presubmit-full-custom-integration-tests.sh | 28 - .../presubmit-integration-tests-common.sh | 48 - ...submit-partial-custom-integration-tests.sh | 33 - .../test-infra/test/unit/release-tests.sh | 73 - .../knative/test-infra/tools/README.md | 3 - .../test-infra/tools/apicoverage/README.md | 14 - .../tools/apicoverage/apicoverage.go | 241 -- .../test-infra/tools/dep-collector/README.md | 88 - .../test-infra/tools/dep-collector/imports.go | 94 - .../tools/dep-collector/licenses.go | 203 -- .../test-infra/tools/dep-collector/main.go | 81 - .../knative/test-infra/tools/gcs/gcs.go | 112 - .../test-infra/tools/githubhelper/Makefile | 17 - .../test-infra/tools/githubhelper/README.md | 10 - .../tools/githubhelper/githubhelper.go | 85 - .../test-infra/tools/testgrid/testgrid.go | 69 - vendor/github.com/pierrec/lz4/LICENSE | 28 + vendor/github.com/pierrec/lz4/block.go | 397 +++ vendor/github.com/pierrec/lz4/debug.go | 23 + vendor/github.com/pierrec/lz4/debug_stub.go | 7 + .../pierrec/lz4/internal/xxh32/xxh32zero.go | 222 ++ vendor/github.com/pierrec/lz4/lz4.go | 68 + vendor/github.com/pierrec/lz4/lz4_go1.10.go | 29 + .../github.com/pierrec/lz4/lz4_notgo1.10.go | 29 + vendor/github.com/pierrec/lz4/reader.go | 295 +++ vendor/github.com/pierrec/lz4/writer.go | 267 ++ vendor/github.com/rcrowley/go-metrics/LICENSE | 29 + .../github.com/rcrowley/go-metrics/counter.go | 112 + .../github.com/rcrowley/go-metrics/debug.go | 76 + vendor/github.com/rcrowley/go-metrics/ewma.go | 138 + .../github.com/rcrowley/go-metrics/gauge.go | 120 + .../rcrowley/go-metrics/gauge_float64.go | 125 + .../rcrowley/go-metrics/graphite.go | 113 + .../rcrowley/go-metrics/healthcheck.go | 61 + .../rcrowley/go-metrics/histogram.go | 202 ++ vendor/github.com/rcrowley/go-metrics/json.go | 31 + vendor/github.com/rcrowley/go-metrics/log.go | 80 + .../github.com/rcrowley/go-metrics/meter.go | 251 ++ .../github.com/rcrowley/go-metrics/metrics.go | 13 + .../rcrowley/go-metrics/opentsdb.go | 119 + .../rcrowley/go-metrics/registry.go | 363 +++ .../github.com/rcrowley/go-metrics/runtime.go | 212 ++ .../rcrowley/go-metrics/runtime_cgo.go | 10 + .../go-metrics/runtime_gccpufraction.go | 9 + .../rcrowley/go-metrics/runtime_no_cgo.go | 7 + .../go-metrics/runtime_no_gccpufraction.go | 9 + .../github.com/rcrowley/go-metrics/sample.go | 616 +++++ .../github.com/rcrowley/go-metrics/syslog.go | 78 + .../github.com/rcrowley/go-metrics/timer.go | 329 +++ .../github.com/rcrowley/go-metrics/writer.go | 100 + 202 files changed, 18997 insertions(+), 5588 deletions(-) create mode 100644 vendor/github.com/Shopify/sarama/LICENSE create mode 100644 vendor/github.com/Shopify/sarama/acl_bindings.go create mode 100644 vendor/github.com/Shopify/sarama/acl_create_request.go create mode 100644 vendor/github.com/Shopify/sarama/acl_create_response.go create mode 100644 vendor/github.com/Shopify/sarama/acl_delete_request.go create mode 100644 vendor/github.com/Shopify/sarama/acl_delete_response.go create mode 100644 vendor/github.com/Shopify/sarama/acl_describe_request.go create mode 100644 vendor/github.com/Shopify/sarama/acl_describe_response.go create mode 100644 vendor/github.com/Shopify/sarama/acl_filter.go create mode 100644 vendor/github.com/Shopify/sarama/acl_types.go create mode 100644 vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go create mode 100644 vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go create mode 100644 vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go create mode 100644 vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go create mode 100644 vendor/github.com/Shopify/sarama/alter_configs_request.go create mode 100644 vendor/github.com/Shopify/sarama/alter_configs_response.go create mode 100644 vendor/github.com/Shopify/sarama/api_versions_request.go create mode 100644 vendor/github.com/Shopify/sarama/api_versions_response.go create mode 100644 vendor/github.com/Shopify/sarama/async_producer.go create mode 100644 vendor/github.com/Shopify/sarama/config_resource_type.go create mode 100644 vendor/github.com/Shopify/sarama/consumer.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_group_members.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_request.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_response.go create mode 100644 vendor/github.com/Shopify/sarama/crc32_field.go create mode 100644 vendor/github.com/Shopify/sarama/create_partitions_request.go create mode 100644 vendor/github.com/Shopify/sarama/create_partitions_response.go create mode 100644 vendor/github.com/Shopify/sarama/create_topics_request.go create mode 100644 vendor/github.com/Shopify/sarama/create_topics_response.go create mode 100644 vendor/github.com/Shopify/sarama/delete_groups_request.go create mode 100644 vendor/github.com/Shopify/sarama/delete_groups_response.go create mode 100644 vendor/github.com/Shopify/sarama/delete_records_request.go create mode 100644 vendor/github.com/Shopify/sarama/delete_records_response.go create mode 100644 vendor/github.com/Shopify/sarama/delete_topics_request.go create mode 100644 vendor/github.com/Shopify/sarama/delete_topics_response.go create mode 100644 vendor/github.com/Shopify/sarama/describe_configs_request.go create mode 100644 vendor/github.com/Shopify/sarama/describe_configs_response.go create mode 100644 vendor/github.com/Shopify/sarama/describe_groups_request.go create mode 100644 vendor/github.com/Shopify/sarama/describe_groups_response.go create mode 100644 vendor/github.com/Shopify/sarama/encoder_decoder.go create mode 100644 vendor/github.com/Shopify/sarama/end_txn_request.go create mode 100644 vendor/github.com/Shopify/sarama/end_txn_response.go create mode 100644 vendor/github.com/Shopify/sarama/errors.go create mode 100644 vendor/github.com/Shopify/sarama/fetch_request.go create mode 100644 vendor/github.com/Shopify/sarama/find_coordinator_request.go create mode 100644 vendor/github.com/Shopify/sarama/find_coordinator_response.go create mode 100644 vendor/github.com/Shopify/sarama/heartbeat_request.go create mode 100644 vendor/github.com/Shopify/sarama/heartbeat_response.go create mode 100644 vendor/github.com/Shopify/sarama/init_producer_id_request.go create mode 100644 vendor/github.com/Shopify/sarama/init_producer_id_response.go create mode 100644 vendor/github.com/Shopify/sarama/join_group_request.go create mode 100644 vendor/github.com/Shopify/sarama/join_group_response.go create mode 100644 vendor/github.com/Shopify/sarama/leave_group_request.go create mode 100644 vendor/github.com/Shopify/sarama/leave_group_response.go create mode 100644 vendor/github.com/Shopify/sarama/list_groups_request.go create mode 100644 vendor/github.com/Shopify/sarama/list_groups_response.go create mode 100644 vendor/github.com/Shopify/sarama/message.go create mode 100644 vendor/github.com/Shopify/sarama/metrics.go create mode 100644 vendor/github.com/Shopify/sarama/mockbroker.go create mode 100644 vendor/github.com/Shopify/sarama/mockresponses.go create mode 100644 vendor/github.com/Shopify/sarama/offset_commit_request.go create mode 100644 vendor/github.com/Shopify/sarama/offset_commit_response.go create mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_request.go create mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_response.go create mode 100644 vendor/github.com/Shopify/sarama/offset_request.go create mode 100644 vendor/github.com/Shopify/sarama/offset_response.go create mode 100644 vendor/github.com/Shopify/sarama/packet_decoder.go create mode 100644 vendor/github.com/Shopify/sarama/packet_encoder.go create mode 100644 vendor/github.com/Shopify/sarama/partitioner.go create mode 100644 vendor/github.com/Shopify/sarama/prep_encoder.go create mode 100644 vendor/github.com/Shopify/sarama/produce_request.go create mode 100644 vendor/github.com/Shopify/sarama/produce_response.go create mode 100644 vendor/github.com/Shopify/sarama/produce_set.go create mode 100644 vendor/github.com/Shopify/sarama/real_decoder.go create mode 100644 vendor/github.com/Shopify/sarama/real_encoder.go create mode 100644 vendor/github.com/Shopify/sarama/record.go create mode 100644 vendor/github.com/Shopify/sarama/record_batch.go create mode 100644 vendor/github.com/Shopify/sarama/request.go create mode 100644 vendor/github.com/Shopify/sarama/response_header.go create mode 100644 vendor/github.com/Shopify/sarama/sarama.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_request.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_response.go create mode 100644 vendor/github.com/Shopify/sarama/sync_group_request.go create mode 100644 vendor/github.com/Shopify/sarama/sync_group_response.go create mode 100644 vendor/github.com/Shopify/sarama/sync_producer.go create mode 100644 vendor/github.com/Shopify/sarama/timestamp.go create mode 100644 vendor/github.com/Shopify/sarama/txn_offset_commit_request.go create mode 100644 vendor/github.com/Shopify/sarama/txn_offset_commit_response.go create mode 100644 vendor/github.com/eapache/go-resiliency/LICENSE create mode 100644 vendor/github.com/eapache/go-resiliency/breaker/breaker.go create mode 100644 vendor/github.com/eapache/go-xerial-snappy/LICENSE create mode 100644 vendor/github.com/eapache/go-xerial-snappy/fuzz.go create mode 100644 vendor/github.com/eapache/go-xerial-snappy/snappy.go create mode 100644 vendor/github.com/eapache/queue/LICENSE create mode 100644 vendor/github.com/eapache/queue/queue.go create mode 100644 vendor/github.com/golang/snappy/AUTHORS create mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS create mode 100644 vendor/github.com/golang/snappy/LICENSE create mode 100644 vendor/github.com/golang/snappy/decode.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.s create mode 100644 vendor/github.com/golang/snappy/decode_other.go create mode 100644 vendor/github.com/golang/snappy/encode.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.s create mode 100644 vendor/github.com/golang/snappy/encode_other.go create mode 100644 vendor/github.com/golang/snappy/snappy.go delete mode 100644 vendor/github.com/knative/test-infra/.github/pull-request-template.md delete mode 100644 vendor/github.com/knative/test-infra/CONTRIBUTING.md delete mode 100644 vendor/github.com/knative/test-infra/Gopkg.lock delete mode 100644 vendor/github.com/knative/test-infra/Gopkg.toml delete mode 100644 vendor/github.com/knative/test-infra/LICENSE delete mode 100644 vendor/github.com/knative/test-infra/OWNERS delete mode 100644 vendor/github.com/knative/test-infra/README.md delete mode 100644 vendor/github.com/knative/test-infra/WORKSPACE delete mode 100644 vendor/github.com/knative/test-infra/ci/README.md delete mode 100644 vendor/github.com/knative/test-infra/ci/gubernator/Makefile delete mode 100644 vendor/github.com/knative/test-infra/ci/gubernator/README.md delete mode 100644 vendor/github.com/knative/test-infra/ci/gubernator/config.yaml delete mode 100644 vendor/github.com/knative/test-infra/ci/gubernator/redir_github.py delete mode 100644 vendor/github.com/knative/test-infra/ci/prow/Makefile delete mode 100644 vendor/github.com/knative/test-infra/ci/prow/README.md delete mode 100644 vendor/github.com/knative/test-infra/ci/prow/boskos/README.md delete mode 100644 vendor/github.com/knative/test-infra/ci/prow/boskos/config.yaml delete mode 100644 vendor/github.com/knative/test-infra/ci/prow/boskos/config_start.yaml delete mode 100644 vendor/github.com/knative/test-infra/ci/prow/boskos/resources.yaml delete mode 100644 vendor/github.com/knative/test-infra/ci/prow/cluster.yaml delete mode 100644 vendor/github.com/knative/test-infra/ci/prow/config.yaml delete mode 100644 vendor/github.com/knative/test-infra/ci/prow/config_start.yaml delete mode 100644 vendor/github.com/knative/test-infra/ci/prow/plugins.yaml delete mode 100644 vendor/github.com/knative/test-infra/ci/prow/prow_setup.md delete mode 100644 vendor/github.com/knative/test-infra/ci/testgrid/Makefile delete mode 100644 vendor/github.com/knative/test-infra/ci/testgrid/README.md delete mode 100644 vendor/github.com/knative/test-infra/ci/testgrid/config.yaml delete mode 100644 vendor/github.com/knative/test-infra/dummy.go delete mode 100644 vendor/github.com/knative/test-infra/images/README.md delete mode 100644 vendor/github.com/knative/test-infra/images/apicoverage/Dockerfile delete mode 100644 vendor/github.com/knative/test-infra/images/apicoverage/Makefile delete mode 100644 vendor/github.com/knative/test-infra/images/apicoverage/README.md delete mode 100644 vendor/github.com/knative/test-infra/images/prow-tests/Dockerfile delete mode 100644 vendor/github.com/knative/test-infra/images/prow-tests/Makefile delete mode 100644 vendor/github.com/knative/test-infra/images/prow-tests/README.md delete mode 100755 vendor/github.com/knative/test-infra/test/e2e-tests.sh delete mode 100755 vendor/github.com/knative/test-infra/test/presubmit-tests.sh delete mode 100755 vendor/github.com/knative/test-infra/test/unit/e2e-custom-flag-tests.sh delete mode 100755 vendor/github.com/knative/test-infra/test/unit/library-tests.sh delete mode 100755 vendor/github.com/knative/test-infra/test/unit/presubmit-full-custom-integration-tests.sh delete mode 100755 vendor/github.com/knative/test-infra/test/unit/presubmit-integration-tests-common.sh delete mode 100755 vendor/github.com/knative/test-infra/test/unit/presubmit-partial-custom-integration-tests.sh delete mode 100755 vendor/github.com/knative/test-infra/test/unit/release-tests.sh delete mode 100644 vendor/github.com/knative/test-infra/tools/README.md delete mode 100644 vendor/github.com/knative/test-infra/tools/apicoverage/README.md delete mode 100644 vendor/github.com/knative/test-infra/tools/apicoverage/apicoverage.go delete mode 100644 vendor/github.com/knative/test-infra/tools/dep-collector/README.md delete mode 100644 vendor/github.com/knative/test-infra/tools/dep-collector/imports.go delete mode 100644 vendor/github.com/knative/test-infra/tools/dep-collector/licenses.go delete mode 100644 vendor/github.com/knative/test-infra/tools/dep-collector/main.go delete mode 100644 vendor/github.com/knative/test-infra/tools/gcs/gcs.go delete mode 100644 vendor/github.com/knative/test-infra/tools/githubhelper/Makefile delete mode 100644 vendor/github.com/knative/test-infra/tools/githubhelper/README.md delete mode 100644 vendor/github.com/knative/test-infra/tools/githubhelper/githubhelper.go delete mode 100644 vendor/github.com/knative/test-infra/tools/testgrid/testgrid.go create mode 100644 vendor/github.com/pierrec/lz4/LICENSE create mode 100644 vendor/github.com/pierrec/lz4/block.go create mode 100644 vendor/github.com/pierrec/lz4/debug.go create mode 100644 vendor/github.com/pierrec/lz4/debug_stub.go create mode 100644 vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go create mode 100644 vendor/github.com/pierrec/lz4/lz4.go create mode 100644 vendor/github.com/pierrec/lz4/lz4_go1.10.go create mode 100644 vendor/github.com/pierrec/lz4/lz4_notgo1.10.go create mode 100644 vendor/github.com/pierrec/lz4/reader.go create mode 100644 vendor/github.com/pierrec/lz4/writer.go create mode 100644 vendor/github.com/rcrowley/go-metrics/LICENSE create mode 100644 vendor/github.com/rcrowley/go-metrics/counter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/debug.go create mode 100644 vendor/github.com/rcrowley/go-metrics/ewma.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge_float64.go create mode 100644 vendor/github.com/rcrowley/go-metrics/graphite.go create mode 100644 vendor/github.com/rcrowley/go-metrics/healthcheck.go create mode 100644 vendor/github.com/rcrowley/go-metrics/histogram.go create mode 100644 vendor/github.com/rcrowley/go-metrics/json.go create mode 100644 vendor/github.com/rcrowley/go-metrics/log.go create mode 100644 vendor/github.com/rcrowley/go-metrics/meter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/metrics.go create mode 100644 vendor/github.com/rcrowley/go-metrics/opentsdb.go create mode 100644 vendor/github.com/rcrowley/go-metrics/registry.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/sample.go create mode 100644 vendor/github.com/rcrowley/go-metrics/syslog.go create mode 100644 vendor/github.com/rcrowley/go-metrics/timer.go create mode 100644 vendor/github.com/rcrowley/go-metrics/writer.go diff --git a/Gopkg.lock b/Gopkg.lock index 59893620880..0d8c4de6882 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -32,6 +32,30 @@ revision = "346938d642f2ec3594ed81d874461961cd0faa76" version = "v1.1.0" +[[projects]] + digest = "1:08143362be979b087c2c1bae5dde986e988d3d5d4dc661727cbe436411b3f33a" + name = "github.com/eapache/go-resiliency" + packages = ["breaker"] + pruneopts = "NUT" + revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:d3430c048e919ed27813d20dc65a32d4e3bae3ad05b83700e244a81eaaf48e2a" + name = "github.com/eapache/go-xerial-snappy" + packages = ["."] + pruneopts = "NUT" + revision = "776d5712da21bc4762676d614db1d8a64f4238b0" + +[[projects]] + digest = "1:0d36a2b325b9e75f8057f7f9fbe778d348d70ba652cb9335485b69d1a5c4e038" + name = "github.com/eapache/queue" + packages = ["."] + pruneopts = "NUT" + revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" + version = "v1.1.0" + [[projects]] digest = "1:32598368f409bbee79deb9d43569fcd92b9fb27f39155f5e166b3371217f051f" name = "github.com/evanphx/json-patch" @@ -121,6 +145,14 @@ revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" version = "v1.1.0" +[[projects]] + branch = "master" + digest = "1:7f114b78210bf5b75f307fc97cff293633c835bab1e0ea8a744a44b39c042dfe" + name = "github.com/golang/snappy" + packages = ["."] + pruneopts = "NUT" + revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" + [[projects]] branch = "master" digest = "1:245bd4eb633039cd66106a5d340ae826d87f4e36a8602fcc940e14176fd26ea7" @@ -366,6 +398,17 @@ revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" version = "v2.0.1" +[[projects]] + digest = "1:1d920dce8e11bfff65b5709e883a8ece131b63a5bc4b2cd404f9ef7eb445f73f" + name = "github.com/pierrec/lz4" + packages = [ + ".", + "internal/xxh32", + ] + pruneopts = "NUT" + revision = "635575b42742856941dbc767b44905bb9ba083f6" + version = "v2.0.7" + [[projects]] digest = "1:03bca087b180bf24c4f9060775f137775550a0834e18f0bca0520a868679dbd7" name = "github.com/prometheus/client_golang" @@ -410,6 +453,14 @@ pruneopts = "NUT" revision = "94663424ae5ae9856b40a9f170762b4197024661" +[[projects]] + branch = "master" + digest = "1:120b256a4d3cd2946ffa4b87102731c2f004aed6d836dc2fba400ed9398696e7" + name = "github.com/rcrowley/go-metrics" + packages = ["."] + pruneopts = "NUT" + revision = "3113b8401b8a98917cde58f8bbd42a1b1c03b1fd" + [[projects]] digest = "1:15e5c398fbd9d2c439b635a08ac161b13d04f0c2aa587fe256b65dc0c3efe8b7" name = "github.com/spf13/pflag" @@ -927,6 +978,7 @@ analyzer-name = "dep" analyzer-version = 1 input-imports = [ + "github.com/Shopify/sarama", "github.com/fsnotify/fsnotify", "github.com/golang/glog", "github.com/google/go-cmp/cmp", diff --git a/Gopkg.toml b/Gopkg.toml index a7bcdfccaa9..9ea4d08a427 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -83,10 +83,6 @@ required = [ name = "github.com/Shopify/sarama" version = "1.19.0" -[[constraint]] - name = "github.com/bsm/sarama-cluster" - version = "2.1.13" - [[constraint]] name = "sigs.k8s.io/controller-runtime" # HEAD as of 2018-09-19 diff --git a/pkg/provisioners/kafka/controller/channel/provider.go b/pkg/provisioners/kafka/controller/channel/provider.go index 7ffdfcd21b1..b386576ee9f 100644 --- a/pkg/provisioners/kafka/controller/channel/provider.go +++ b/pkg/provisioners/kafka/controller/channel/provider.go @@ -17,8 +17,7 @@ limitations under the License. package channel import ( - "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" - common "github.com/knative/eventing/pkg/provisioners/kafka/controller" + "github.com/Shopify/sarama" "go.uber.org/zap" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" @@ -27,6 +26,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + common "github.com/knative/eventing/pkg/provisioners/kafka/controller" ) const ( @@ -36,10 +38,13 @@ const ( ) type reconciler struct { - client client.Client - recorder record.EventRecorder - logger *zap.Logger - config *common.KafkaProvisionerConfig + client client.Client + recorder record.EventRecorder + logger *zap.Logger + config *common.KafkaProvisionerConfig + // Using a shared kafkaClusterAdmin does not work currently because of an issue with + // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. + kafkaClusterAdmin sarama.ClusterAdmin } // Verify the struct implements reconcile.Reconciler @@ -50,9 +55,9 @@ func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfi // Setup a new controller to Reconcile Channel. c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: &reconciler{ - recorder: mgr.GetRecorder(controllerAgentName), - logger: logger, - config: config, + recorder: mgr.GetRecorder(controllerAgentName), + logger: logger, + config: config, }, }) if err != nil { diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go index 8e19a5875ce..118de57b998 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -105,10 +105,13 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { return err } - kafkaClusterAdmin, err := createKafkaAdminClient(r.config) - if err != nil { - r.logger.Fatal("unable to build kafka admin client", zap.Error(err)) - return err + kafkaClusterAdmin := r.kafkaClusterAdmin + if kafkaClusterAdmin == nil { + kafkaClusterAdmin, err = createKafkaAdminClient(r.config) + if err != nil { + r.logger.Fatal("unable to build kafka admin client", zap.Error(err)) + return err + } } deletionTimestamp := accessor.GetDeletionTimestamp() @@ -130,7 +133,7 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { channel.Status.MarkProvisioned() // close the connection - kafkaClusterAdmin.Close(); + kafkaClusterAdmin.Close() return nil } diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go index f2abf05ccb2..224d6b335a4 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -314,15 +314,15 @@ func TestProvisionChannel(t *testing.T) { logger := provisioners.NewProvisionerLoggerFromConfig(provisioners.NewLoggingConfig()) r := &reconciler{ logger: logger.Desugar(), - kafkaClusterAdmin: &mockClusterAdmin{ - mockCreateTopicFunc: func(topic string, detail *sarama.TopicDetail, validateOnly bool) error { - if topic != tc.wantTopicName { - t.Errorf("expected topic name: %+v got: %+v", tc.wantTopicName, topic) - } - return tc.mockError - }}, } - err := r.provisionChannel(tc.c) + kafkaClusterAdmin := &mockClusterAdmin{ + mockCreateTopicFunc: func(topic string, detail *sarama.TopicDetail, validateOnly bool) error { + if topic != tc.wantTopicName { + t.Errorf("expected topic name: %+v got: %+v", tc.wantTopicName, topic) + } + return tc.mockError + }} + err := r.provisionChannel(tc.c, kafkaClusterAdmin) var got string if err != nil { got = err.Error() @@ -364,16 +364,16 @@ func TestDeprovisionChannel(t *testing.T) { t.Logf("running test %s", tc.name) logger := provisioners.NewProvisionerLoggerFromConfig(provisioners.NewLoggingConfig()) r := &reconciler{ - logger: logger.Desugar(), - kafkaClusterAdmin: &mockClusterAdmin{ - mockDeleteTopicFunc: func(topic string) error { - if topic != tc.wantTopicName { - t.Errorf("expected topic name: %+v got: %+v", tc.wantTopicName, topic) - } - return tc.mockError - }}, - } - err := r.deprovisionChannel(tc.c) + logger: logger.Desugar()} + kafkaClusterAdmin := &mockClusterAdmin{ + mockDeleteTopicFunc: func(topic string) error { + if topic != tc.wantTopicName { + t.Errorf("expected topic name: %+v got: %+v", tc.wantTopicName, topic) + } + return tc.mockError + }} + + err := r.deprovisionChannel(tc.c, kafkaClusterAdmin) var got string if err != nil { got = err.Error() @@ -469,10 +469,6 @@ func getNewClusterChannelProvisioner(name string, isReady bool) *eventingv1alpha Type: eventingv1alpha1.ClusterChannelProvisionerConditionReady, Status: condStatus, }, - { - Type: eventingv1alpha1.ChannelConditionSinkable, - Status: "Unknown", - }, }, }, } diff --git a/third_party/VENDOR-LICENSE b/third_party/VENDOR-LICENSE index a6e32d48afd..a4b76f23095 100644 --- a/third_party/VENDOR-LICENSE +++ b/third_party/VENDOR-LICENSE @@ -207,6 +207,32 @@ Import: github.com/knative/eventing/vendor/cloud.google.com/go +=========================================================== +Import: github.com/knative/eventing/vendor/github.com/Shopify/sarama + +Copyright (c) 2013 Shopify + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + =========================================================== Import: github.com/knative/eventing/vendor/github.com/beorn7/perks @@ -254,6 +280,87 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +=========================================================== +Import: github.com/knative/eventing/vendor/github.com/eapache/go-resiliency + +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + + +=========================================================== +Import: github.com/knative/eventing/vendor/github.com/eapache/go-xerial-snappy + +The MIT License (MIT) + +Copyright (c) 2016 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +=========================================================== +Import: github.com/knative/eventing/vendor/github.com/eapache/queue + +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + =========================================================== Import: github.com/knative/eventing/vendor/github.com/evanphx/json-patch @@ -1276,6 +1383,39 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +=========================================================== +Import: github.com/knative/eventing/vendor/github.com/golang/snappy + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + =========================================================== Import: github.com/knative/eventing/vendor/github.com/google/btree @@ -3544,6 +3684,40 @@ THE SOFTWARE. +=========================================================== +Import: github.com/knative/eventing/vendor/github.com/pierrec/lz4 + +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + + =========================================================== Import: github.com/knative/eventing/vendor/github.com/prometheus/client_golang @@ -4372,6 +4546,41 @@ Import: github.com/knative/eventing/vendor/github.com/prometheus/procfs +=========================================================== +Import: github.com/knative/eventing/vendor/github.com/rcrowley/go-metrics + +Copyright 2012 Richard Crowley. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation +are those of the authors and should not be interpreted as representing +official policies, either expressed or implied, of Richard Crowley. + + + =========================================================== Import: github.com/knative/eventing/vendor/github.com/spf13/pflag diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE new file mode 100644 index 00000000000..d2bf4352f4c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013 Shopify + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go new file mode 100644 index 00000000000..51517359abc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_bindings.go @@ -0,0 +1,119 @@ +package sarama + +type Resource struct { + ResourceType AclResourceType + ResourceName string +} + +func (r *Resource) encode(pe packetEncoder) error { + pe.putInt8(int8(r.ResourceType)) + + if err := pe.putString(r.ResourceName); err != nil { + return err + } + + return nil +} + +func (r *Resource) decode(pd packetDecoder, version int16) (err error) { + resourceType, err := pd.getInt8() + if err != nil { + return err + } + r.ResourceType = AclResourceType(resourceType) + + if r.ResourceName, err = pd.getString(); err != nil { + return err + } + + return nil +} + +type Acl struct { + Principal string + Host string + Operation AclOperation + PermissionType AclPermissionType +} + +func (a *Acl) encode(pe packetEncoder) error { + if err := pe.putString(a.Principal); err != nil { + return err + } + + if err := pe.putString(a.Host); err != nil { + return err + } + + pe.putInt8(int8(a.Operation)) + pe.putInt8(int8(a.PermissionType)) + + return nil +} + +func (a *Acl) decode(pd packetDecoder, version int16) (err error) { + if a.Principal, err = pd.getString(); err != nil { + return err + } + + if a.Host, err = pd.getString(); err != nil { + return err + } + + operation, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = AclOperation(operation) + + permissionType, err := pd.getInt8() + if err != nil { + return err + } + a.PermissionType = AclPermissionType(permissionType) + + return nil +} + +type ResourceAcls struct { + Resource + Acls []*Acl +} + +func (r *ResourceAcls) encode(pe packetEncoder) error { + if err := r.Resource.encode(pe); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Acls)); err != nil { + return err + } + for _, acl := range r.Acls { + if err := acl.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *ResourceAcls) decode(pd packetDecoder, version int16) error { + if err := r.Resource.decode(pd, version); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Acls = make([]*Acl, n) + for i := 0; i < n; i++ { + r.Acls[i] = new(Acl) + if err := r.Acls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go new file mode 100644 index 00000000000..0b6ecbec3e1 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_create_request.go @@ -0,0 +1,76 @@ +package sarama + +type CreateAclsRequest struct { + AclCreations []*AclCreation +} + +func (c *CreateAclsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.AclCreations)); err != nil { + return err + } + + for _, aclCreation := range c.AclCreations { + if err := aclCreation.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.AclCreations = make([]*AclCreation, n) + + for i := 0; i < n; i++ { + c.AclCreations[i] = new(AclCreation) + if err := c.AclCreations[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *CreateAclsRequest) key() int16 { + return 30 +} + +func (d *CreateAclsRequest) version() int16 { + return 0 +} + +func (d *CreateAclsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type AclCreation struct { + Resource + Acl +} + +func (a *AclCreation) encode(pe packetEncoder) error { + if err := a.Resource.encode(pe); err != nil { + return err + } + if err := a.Acl.encode(pe); err != nil { + return err + } + + return nil +} + +func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) { + if err := a.Resource.decode(pd, version); err != nil { + return err + } + if err := a.Acl.decode(pd, version); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go new file mode 100644 index 00000000000..8a56f357354 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_create_response.go @@ -0,0 +1,88 @@ +package sarama + +import "time" + +type CreateAclsResponse struct { + ThrottleTime time.Duration + AclCreationResponses []*AclCreationResponse +} + +func (c *CreateAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil { + return err + } + + for _, aclCreationResponse := range c.AclCreationResponses { + if err := aclCreationResponse.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.AclCreationResponses = make([]*AclCreationResponse, n) + for i := 0; i < n; i++ { + c.AclCreationResponses[i] = new(AclCreationResponse) + if err := c.AclCreationResponses[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *CreateAclsResponse) key() int16 { + return 30 +} + +func (d *CreateAclsResponse) version() int16 { + return 0 +} + +func (d *CreateAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type AclCreationResponse struct { + Err KError + ErrMsg *string +} + +func (a *AclCreationResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(a.Err)) + + if err := pe.putNullableString(a.ErrMsg); err != nil { + return err + } + + return nil +} + +func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + a.Err = KError(kerr) + + if a.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go new file mode 100644 index 00000000000..4133dceab71 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go @@ -0,0 +1,48 @@ +package sarama + +type DeleteAclsRequest struct { + Filters []*AclFilter +} + +func (d *DeleteAclsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(d.Filters)); err != nil { + return err + } + + for _, filter := range d.Filters { + if err := filter.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + d.Filters = make([]*AclFilter, n) + for i := 0; i < n; i++ { + d.Filters[i] = new(AclFilter) + if err := d.Filters[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsRequest) key() int16 { + return 31 +} + +func (d *DeleteAclsRequest) version() int16 { + return 0 +} + +func (d *DeleteAclsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go new file mode 100644 index 00000000000..b5e1c45eb5d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go @@ -0,0 +1,155 @@ +package sarama + +import "time" + +type DeleteAclsResponse struct { + ThrottleTime time.Duration + FilterResponses []*FilterResponse +} + +func (a *DeleteAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(a.FilterResponses)); err != nil { + return err + } + + for _, filterResponse := range a.FilterResponses { + if err := filterResponse.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (a *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + a.FilterResponses = make([]*FilterResponse, n) + + for i := 0; i < n; i++ { + a.FilterResponses[i] = new(FilterResponse) + if err := a.FilterResponses[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsResponse) key() int16 { + return 31 +} + +func (d *DeleteAclsResponse) version() int16 { + return 0 +} + +func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type FilterResponse struct { + Err KError + ErrMsg *string + MatchingAcls []*MatchingAcl +} + +func (f *FilterResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(f.Err)) + if err := pe.putNullableString(f.ErrMsg); err != nil { + return err + } + + if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil { + return err + } + for _, matchingAcl := range f.MatchingAcls { + if err := matchingAcl.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + f.Err = KError(kerr) + + if f.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + f.MatchingAcls = make([]*MatchingAcl, n) + for i := 0; i < n; i++ { + f.MatchingAcls[i] = new(MatchingAcl) + if err := f.MatchingAcls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +type MatchingAcl struct { + Err KError + ErrMsg *string + Resource + Acl +} + +func (m *MatchingAcl) encode(pe packetEncoder) error { + pe.putInt16(int16(m.Err)) + if err := pe.putNullableString(m.ErrMsg); err != nil { + return err + } + + if err := m.Resource.encode(pe); err != nil { + return err + } + + if err := m.Acl.encode(pe); err != nil { + return err + } + + return nil +} + +func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + m.Err = KError(kerr) + + if m.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + if err := m.Resource.decode(pd, version); err != nil { + return err + } + + if err := m.Acl.decode(pd, version); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go new file mode 100644 index 00000000000..02a5a1f0e22 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go @@ -0,0 +1,25 @@ +package sarama + +type DescribeAclsRequest struct { + AclFilter +} + +func (d *DescribeAclsRequest) encode(pe packetEncoder) error { + return d.AclFilter.encode(pe) +} + +func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) { + return d.AclFilter.decode(pd, version) +} + +func (d *DescribeAclsRequest) key() int16 { + return 29 +} + +func (d *DescribeAclsRequest) version() int16 { + return 0 +} + +func (d *DescribeAclsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go new file mode 100644 index 00000000000..5bc9497f4c5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go @@ -0,0 +1,80 @@ +package sarama + +import "time" + +type DescribeAclsResponse struct { + ThrottleTime time.Duration + Err KError + ErrMsg *string + ResourceAcls []*ResourceAcls +} + +func (d *DescribeAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(d.Err)) + + if err := pe.putNullableString(d.ErrMsg); err != nil { + return err + } + + if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil { + return err + } + + for _, resourceAcl := range d.ResourceAcls { + if err := resourceAcl.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + d.Err = KError(kerr) + + errmsg, err := pd.getString() + if err != nil { + return err + } + if errmsg != "" { + d.ErrMsg = &errmsg + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + d.ResourceAcls = make([]*ResourceAcls, n) + + for i := 0; i < n; i++ { + d.ResourceAcls[i] = new(ResourceAcls) + if err := d.ResourceAcls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeAclsResponse) key() int16 { + return 29 +} + +func (d *DescribeAclsResponse) version() int16 { + return 0 +} + +func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go new file mode 100644 index 00000000000..97063542198 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_filter.go @@ -0,0 +1,61 @@ +package sarama + +type AclFilter struct { + ResourceType AclResourceType + ResourceName *string + Principal *string + Host *string + Operation AclOperation + PermissionType AclPermissionType +} + +func (a *AclFilter) encode(pe packetEncoder) error { + pe.putInt8(int8(a.ResourceType)) + if err := pe.putNullableString(a.ResourceName); err != nil { + return err + } + if err := pe.putNullableString(a.Principal); err != nil { + return err + } + if err := pe.putNullableString(a.Host); err != nil { + return err + } + pe.putInt8(int8(a.Operation)) + pe.putInt8(int8(a.PermissionType)) + + return nil +} + +func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) { + resourceType, err := pd.getInt8() + if err != nil { + return err + } + a.ResourceType = AclResourceType(resourceType) + + if a.ResourceName, err = pd.getNullableString(); err != nil { + return err + } + + if a.Principal, err = pd.getNullableString(); err != nil { + return err + } + + if a.Host, err = pd.getNullableString(); err != nil { + return err + } + + operation, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = AclOperation(operation) + + permissionType, err := pd.getInt8() + if err != nil { + return err + } + a.PermissionType = AclPermissionType(permissionType) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go new file mode 100644 index 00000000000..19da6f2f451 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_types.go @@ -0,0 +1,42 @@ +package sarama + +type AclOperation int + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java +const ( + AclOperationUnknown AclOperation = 0 + AclOperationAny AclOperation = 1 + AclOperationAll AclOperation = 2 + AclOperationRead AclOperation = 3 + AclOperationWrite AclOperation = 4 + AclOperationCreate AclOperation = 5 + AclOperationDelete AclOperation = 6 + AclOperationAlter AclOperation = 7 + AclOperationDescribe AclOperation = 8 + AclOperationClusterAction AclOperation = 9 + AclOperationDescribeConfigs AclOperation = 10 + AclOperationAlterConfigs AclOperation = 11 + AclOperationIdempotentWrite AclOperation = 12 +) + +type AclPermissionType int + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java +const ( + AclPermissionUnknown AclPermissionType = 0 + AclPermissionAny AclPermissionType = 1 + AclPermissionDeny AclPermissionType = 2 + AclPermissionAllow AclPermissionType = 3 +) + +type AclResourceType int + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java +const ( + AclResourceUnknown AclResourceType = 0 + AclResourceAny AclResourceType = 1 + AclResourceTopic AclResourceType = 2 + AclResourceGroup AclResourceType = 3 + AclResourceCluster AclResourceType = 4 + AclResourceTransactionalID AclResourceType = 5 +) diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go new file mode 100644 index 00000000000..6da166c634b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go @@ -0,0 +1,52 @@ +package sarama + +type AddOffsetsToTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + GroupID string +} + +func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + + pe.putInt64(a.ProducerID) + + pe.putInt16(a.ProducerEpoch) + + if err := pe.putString(a.GroupID); err != nil { + return err + } + + return nil +} + +func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + if a.GroupID, err = pd.getString(); err != nil { + return err + } + return nil +} + +func (a *AddOffsetsToTxnRequest) key() int16 { + return 25 +} + +func (a *AddOffsetsToTxnRequest) version() int16 { + return 0 +} + +func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go new file mode 100644 index 00000000000..3a46151a050 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go @@ -0,0 +1,44 @@ +package sarama + +import ( + "time" +) + +type AddOffsetsToTxnResponse struct { + ThrottleTime time.Duration + Err KError +} + +func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(a.Err)) + return nil +} + +func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + a.Err = KError(kerr) + + return nil +} + +func (a *AddOffsetsToTxnResponse) key() int16 { + return 25 +} + +func (a *AddOffsetsToTxnResponse) version() int16 { + return 0 +} + +func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go new file mode 100644 index 00000000000..a8a59225e4d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go @@ -0,0 +1,76 @@ +package sarama + +type AddPartitionsToTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + TopicPartitions map[string][]int32 +} + +func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + pe.putInt64(a.ProducerID) + pe.putInt16(a.ProducerEpoch) + + if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil { + return err + } + for topic, partitions := range a.TopicPartitions { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + return nil +} + +func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + a.TopicPartitions = make(map[string][]int32) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + partitions, err := pd.getInt32Array() + if err != nil { + return err + } + + a.TopicPartitions[topic] = partitions + } + + return nil +} + +func (a *AddPartitionsToTxnRequest) key() int16 { + return 24 +} + +func (a *AddPartitionsToTxnRequest) version() int16 { + return 0 +} + +func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go new file mode 100644 index 00000000000..581c556c5ce --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go @@ -0,0 +1,108 @@ +package sarama + +import ( + "time" +) + +type AddPartitionsToTxnResponse struct { + ThrottleTime time.Duration + Errors map[string][]*PartitionError +} + +func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(a.Errors)); err != nil { + return err + } + + for topic, e := range a.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(e)); err != nil { + return err + } + for _, partitionError := range e { + if err := partitionError.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Errors = make(map[string][]*PartitionError) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Errors[topic] = make([]*PartitionError, m) + + for j := 0; j < m; j++ { + a.Errors[topic][j] = new(PartitionError) + if err := a.Errors[topic][j].decode(pd, version); err != nil { + return err + } + } + } + + return nil +} + +func (a *AddPartitionsToTxnResponse) key() int16 { + return 24 +} + +func (a *AddPartitionsToTxnResponse) version() int16 { + return 0 +} + +func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type PartitionError struct { + Partition int32 + Err KError +} + +func (p *PartitionError) encode(pe packetEncoder) error { + pe.putInt32(p.Partition) + pe.putInt16(int16(p.Err)) + return nil +} + +func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) { + if p.Partition, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + p.Err = KError(kerr) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go new file mode 100644 index 00000000000..48c44ead67a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go @@ -0,0 +1,120 @@ +package sarama + +type AlterConfigsRequest struct { + Resources []*AlterConfigsResource + ValidateOnly bool +} + +type AlterConfigsResource struct { + Type ConfigResourceType + Name string + ConfigEntries map[string]*string +} + +func (acr *AlterConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(acr.Resources)); err != nil { + return err + } + + for _, r := range acr.Resources { + if err := r.encode(pe); err != nil { + return err + } + } + + pe.putBool(acr.ValidateOnly) + return nil +} + +func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { + resourceCount, err := pd.getArrayLength() + if err != nil { + return err + } + + acr.Resources = make([]*AlterConfigsResource, resourceCount) + for i := range acr.Resources { + r := &AlterConfigsResource{} + err = r.decode(pd, version) + if err != nil { + return err + } + acr.Resources[i] = r + } + + validateOnly, err := pd.getBool() + if err != nil { + return err + } + + acr.ValidateOnly = validateOnly + + return nil +} + +func (ac *AlterConfigsResource) encode(pe packetEncoder) error { + pe.putInt8(int8(ac.Type)) + + if err := pe.putString(ac.Name); err != nil { + return err + } + + if err := pe.putArrayLength(len(ac.ConfigEntries)); err != nil { + return err + } + for configKey, configValue := range ac.ConfigEntries { + if err := pe.putString(configKey); err != nil { + return err + } + if err := pe.putNullableString(configValue); err != nil { + return err + } + } + + return nil +} + +func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error { + t, err := pd.getInt8() + if err != nil { + return err + } + ac.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + ac.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + ac.ConfigEntries = make(map[string]*string, n) + for i := 0; i < n; i++ { + configKey, err := pd.getString() + if err != nil { + return err + } + if ac.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + return err + } + } + } + return err +} + +func (acr *AlterConfigsRequest) key() int16 { + return 33 +} + +func (acr *AlterConfigsRequest) version() int16 { + return 0 +} + +func (acr *AlterConfigsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go new file mode 100644 index 00000000000..29b09e1ff84 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go @@ -0,0 +1,95 @@ +package sarama + +import "time" + +type AlterConfigsResponse struct { + ThrottleTime time.Duration + Resources []*AlterConfigsResourceResponse +} + +type AlterConfigsResourceResponse struct { + ErrorCode int16 + ErrorMsg string + Type ConfigResourceType + Name string +} + +func (ct *AlterConfigsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(ct.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(ct.Resources)); err != nil { + return err + } + + for i := range ct.Resources { + pe.putInt16(ct.Resources[i].ErrorCode) + err := pe.putString(ct.Resources[i].ErrorMsg) + if err != nil { + return nil + } + pe.putInt8(int8(ct.Resources[i].Type)) + err = pe.putString(ct.Resources[i].Name) + if err != nil { + return nil + } + } + + return nil +} + +func (acr *AlterConfigsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + acr.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + responseCount, err := pd.getArrayLength() + if err != nil { + return err + } + + acr.Resources = make([]*AlterConfigsResourceResponse, responseCount) + + for i := range acr.Resources { + acr.Resources[i] = new(AlterConfigsResourceResponse) + + errCode, err := pd.getInt16() + if err != nil { + return err + } + acr.Resources[i].ErrorCode = errCode + + e, err := pd.getString() + if err != nil { + return err + } + acr.Resources[i].ErrorMsg = e + + t, err := pd.getInt8() + if err != nil { + return err + } + acr.Resources[i].Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + acr.Resources[i].Name = name + } + + return nil +} + +func (r *AlterConfigsResponse) key() int16 { + return 32 +} + +func (r *AlterConfigsResponse) version() int16 { + return 0 +} + +func (r *AlterConfigsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go new file mode 100644 index 00000000000..ab65f01ccff --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_request.go @@ -0,0 +1,24 @@ +package sarama + +type ApiVersionsRequest struct { +} + +func (r *ApiVersionsRequest) encode(pe packetEncoder) error { + return nil +} + +func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { + return nil +} + +func (r *ApiVersionsRequest) key() int16 { + return 18 +} + +func (r *ApiVersionsRequest) version() int16 { + return 0 +} + +func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go new file mode 100644 index 00000000000..23bc326e15f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_response.go @@ -0,0 +1,87 @@ +package sarama + +type ApiVersionsResponseBlock struct { + ApiKey int16 + MinVersion int16 + MaxVersion int16 +} + +func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error { + pe.putInt16(b.ApiKey) + pe.putInt16(b.MinVersion) + pe.putInt16(b.MaxVersion) + return nil +} + +func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { + var err error + + if b.ApiKey, err = pd.getInt16(); err != nil { + return err + } + + if b.MinVersion, err = pd.getInt16(); err != nil { + return err + } + + if b.MaxVersion, err = pd.getInt16(); err != nil { + return err + } + + return nil +} + +type ApiVersionsResponse struct { + Err KError + ApiVersions []*ApiVersionsResponseBlock +} + +func (r *ApiVersionsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if err := pe.putArrayLength(len(r.ApiVersions)); err != nil { + return err + } + for _, apiVersion := range r.ApiVersions { + if err := apiVersion.encode(pe); err != nil { + return err + } + } + return nil +} + +func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks) + for i := 0; i < numBlocks; i++ { + block := new(ApiVersionsResponseBlock) + if err := block.decode(pd); err != nil { + return err + } + r.ApiVersions[i] = block + } + + return nil +} + +func (r *ApiVersionsResponse) key() int16 { + return 18 +} + +func (r *ApiVersionsResponse) version() int16 { + return 0 +} + +func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go new file mode 100644 index 00000000000..89722554092 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -0,0 +1,932 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "sync" + "time" + + "github.com/eapache/go-resiliency/breaker" + "github.com/eapache/queue" +) + +// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages +// to the correct broker for the provided topic-partition, refreshing metadata as appropriate, +// and parses responses for errors. You must read from the Errors() channel or the +// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid +// leaks: it will not be garbage-collected automatically when it passes out of +// scope. +type AsyncProducer interface { + + // AsyncClose triggers a shutdown of the producer. The shutdown has completed + // when both the Errors and Successes channels have been closed. When calling + // AsyncClose, you *must* continue to read from those channels in order to + // drain the results of any messages in flight. + AsyncClose() + + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. + Close() error + + // Input is the input channel for the user to write messages to that they + // wish to send. + Input() chan<- *ProducerMessage + + // Successes is the success output channel back to the user when Return.Successes is + // enabled. If Return.Successes is true, you MUST read from this channel or the + // Producer will deadlock. It is suggested that you send and read messages + // together in a single select statement. + Successes() <-chan *ProducerMessage + + // Errors is the error output channel back to the user. You MUST read from this + // channel or the Producer will deadlock when the channel is full. Alternatively, + // you can set Producer.Return.Errors in your config to false, which prevents + // errors to be returned. + Errors() <-chan *ProducerError +} + +type asyncProducer struct { + client Client + conf *Config + ownClient bool + + errors chan *ProducerError + input, successes, retries chan *ProducerMessage + inFlight sync.WaitGroup + + brokers map[*Broker]chan<- *ProducerMessage + brokerRefs map[chan<- *ProducerMessage]int + brokerLock sync.Mutex +} + +// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. +func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { + client, err := NewClient(addrs, conf) + if err != nil { + return nil, err + } + + p, err := NewAsyncProducerFromClient(client) + if err != nil { + return nil, err + } + p.(*asyncProducer).ownClient = true + return p, nil +} + +// NewAsyncProducerFromClient creates a new Producer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + p := &asyncProducer{ + client: client, + conf: client.Config(), + errors: make(chan *ProducerError), + input: make(chan *ProducerMessage), + successes: make(chan *ProducerMessage), + retries: make(chan *ProducerMessage), + brokers: make(map[*Broker]chan<- *ProducerMessage), + brokerRefs: make(map[chan<- *ProducerMessage]int), + } + + // launch our singleton dispatchers + go withRecover(p.dispatcher) + go withRecover(p.retryHandler) + + return p, nil +} + +type flagSet int8 + +const ( + syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer + fin // final message from partitionProducer to brokerProducer and back + shutdown // start the shutdown process +) + +// ProducerMessage is the collection of elements passed to the Producer in order to send a message. +type ProducerMessage struct { + Topic string // The Kafka topic for this message. + // The partitioning key for this message. Pre-existing Encoders include + // StringEncoder and ByteEncoder. + Key Encoder + // The actual message to store in Kafka. Pre-existing Encoders include + // StringEncoder and ByteEncoder. + Value Encoder + + // The headers are key-value pairs that are transparently passed + // by Kafka between producers and consumers. + Headers []RecordHeader + + // This field is used to hold arbitrary data you wish to include so it + // will be available when receiving on the Successes and Errors channels. + // Sarama completely ignores this field and is only to be used for + // pass-through data. + Metadata interface{} + + // Below this point are filled in by the producer as the message is processed + + // Offset is the offset of the message stored on the broker. This is only + // guaranteed to be defined if the message was successfully delivered and + // RequiredAcks is not NoResponse. + Offset int64 + // Partition is the partition that the message was sent to. This is only + // guaranteed to be defined if the message was successfully delivered. + Partition int32 + // Timestamp is the timestamp assigned to the message by the broker. This + // is only guaranteed to be defined if the message was successfully + // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at + // least version 0.10.0. + Timestamp time.Time + + retries int + flags flagSet + expectation chan *ProducerError +} + +const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. + +func (m *ProducerMessage) byteSize(version int) int { + var size int + if version >= 2 { + size = maximumRecordOverhead + for _, h := range m.Headers { + size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32 + } + } else { + size = producerMessageOverhead + } + if m.Key != nil { + size += m.Key.Length() + } + if m.Value != nil { + size += m.Value.Length() + } + return size +} + +func (m *ProducerMessage) clear() { + m.flags = 0 + m.retries = 0 +} + +// ProducerError is the type of error generated when the producer fails to deliver a message. +// It contains the original ProducerMessage as well as the actual error value. +type ProducerError struct { + Msg *ProducerMessage + Err error +} + +func (pe ProducerError) Error() string { + return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err) +} + +// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface. +// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel +// when closing a producer. +type ProducerErrors []*ProducerError + +func (pe ProducerErrors) Error() string { + return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe)) +} + +func (p *asyncProducer) Errors() <-chan *ProducerError { + return p.errors +} + +func (p *asyncProducer) Successes() <-chan *ProducerMessage { + return p.successes +} + +func (p *asyncProducer) Input() chan<- *ProducerMessage { + return p.input +} + +func (p *asyncProducer) Close() error { + p.AsyncClose() + + if p.conf.Producer.Return.Successes { + go withRecover(func() { + for range p.successes { + } + }) + } + + var errors ProducerErrors + if p.conf.Producer.Return.Errors { + for event := range p.errors { + errors = append(errors, event) + } + } else { + <-p.errors + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (p *asyncProducer) AsyncClose() { + go withRecover(p.shutdown) +} + +// singleton +// dispatches messages by topic +func (p *asyncProducer) dispatcher() { + handlers := make(map[string]chan<- *ProducerMessage) + shuttingDown := false + + for msg := range p.input { + if msg == nil { + Logger.Println("Something tried to send a nil message, it was ignored.") + continue + } + + if msg.flags&shutdown != 0 { + shuttingDown = true + p.inFlight.Done() + continue + } else if msg.retries == 0 { + if shuttingDown { + // we can't just call returnError here because that decrements the wait group, + // which hasn't been incremented yet for this message, and shouldn't be + pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + continue + } + p.inFlight.Add(1) + } + + version := 1 + if p.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } else if msg.Headers != nil { + p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11")) + continue + } + if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes { + p.returnError(msg, ErrMessageSizeTooLarge) + continue + } + + handler := handlers[msg.Topic] + if handler == nil { + handler = p.newTopicProducer(msg.Topic) + handlers[msg.Topic] = handler + } + + handler <- msg + } + + for _, handler := range handlers { + close(handler) + } +} + +// one per topic +// partitions messages, then dispatches them by partition +type topicProducer struct { + parent *asyncProducer + topic string + input <-chan *ProducerMessage + + breaker *breaker.Breaker + handlers map[int32]chan<- *ProducerMessage + partitioner Partitioner +} + +func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + tp := &topicProducer{ + parent: p, + topic: topic, + input: input, + breaker: breaker.New(3, 1, 10*time.Second), + handlers: make(map[int32]chan<- *ProducerMessage), + partitioner: p.conf.Producer.Partitioner(topic), + } + go withRecover(tp.dispatch) + return input +} + +func (tp *topicProducer) dispatch() { + for msg := range tp.input { + if msg.retries == 0 { + if err := tp.partitionMessage(msg); err != nil { + tp.parent.returnError(msg, err) + continue + } + } + + handler := tp.handlers[msg.Partition] + if handler == nil { + handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition) + tp.handlers[msg.Partition] = handler + } + + handler <- msg + } + + for _, handler := range tp.handlers { + close(handler) + } +} + +func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { + var partitions []int32 + + err := tp.breaker.Run(func() (err error) { + var requiresConsistency = false + if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok { + requiresConsistency = ep.MessageRequiresConsistency(msg) + } else { + requiresConsistency = tp.partitioner.RequiresConsistency() + } + + if requiresConsistency { + partitions, err = tp.parent.client.Partitions(msg.Topic) + } else { + partitions, err = tp.parent.client.WritablePartitions(msg.Topic) + } + return + }) + + if err != nil { + return err + } + + numPartitions := int32(len(partitions)) + + if numPartitions == 0 { + return ErrLeaderNotAvailable + } + + choice, err := tp.partitioner.Partition(msg, numPartitions) + + if err != nil { + return err + } else if choice < 0 || choice >= numPartitions { + return ErrInvalidPartition + } + + msg.Partition = partitions[choice] + + return nil +} + +// one per partition per topic +// dispatches messages to the appropriate broker +// also responsible for maintaining message order during retries +type partitionProducer struct { + parent *asyncProducer + topic string + partition int32 + input <-chan *ProducerMessage + + leader *Broker + breaker *breaker.Breaker + output chan<- *ProducerMessage + + // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through, + // all other messages get buffered in retryState[msg.retries].buf to preserve ordering + // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and + // therefore whether our buffer is complete and safe to flush) + highWatermark int + retryState []partitionRetryState +} + +type partitionRetryState struct { + buf []*ProducerMessage + expectChaser bool +} + +func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + pp := &partitionProducer{ + parent: p, + topic: topic, + partition: partition, + input: input, + + breaker: breaker.New(3, 1, 10*time.Second), + retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1), + } + go withRecover(pp.dispatch) + return input +} + +func (pp *partitionProducer) dispatch() { + // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` + // on the first message + pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) + if pp.leader != nil { + pp.output = pp.parent.getBrokerProducer(pp.leader) + pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight + pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + } + + for msg := range pp.input { + if msg.retries > pp.highWatermark { + // a new, higher, retry level; handle it and then back off + pp.newHighWatermark(msg.retries) + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + } else if pp.highWatermark > 0 { + // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level + if msg.retries < pp.highWatermark { + // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin) + if msg.flags&fin == fin { + pp.retryState[msg.retries].expectChaser = false + pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected + } else { + pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg) + } + continue + } else if msg.flags&fin == fin { + // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set, + // meaning this retry level is done and we can go down (at least) one level and flush that + pp.retryState[pp.highWatermark].expectChaser = false + pp.flushRetryBuffers() + pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected + continue + } + } + + // if we made it this far then the current msg contains real data, and can be sent to the next goroutine + // without breaking any of our ordering guarantees + + if pp.output == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnError(msg, err) + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + continue + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + pp.output <- msg + } + + if pp.output != nil { + pp.parent.unrefBrokerProducer(pp.leader, pp.output) + } +} + +func (pp *partitionProducer) newHighWatermark(hwm int) { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm) + pp.highWatermark = hwm + + // send off a fin so that we know when everything "in between" has made it + // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) + pp.retryState[pp.highWatermark].expectChaser = true + pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight + pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} + + // a new HWM means that our current broker selection is out of date + Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + pp.parent.unrefBrokerProducer(pp.leader, pp.output) + pp.output = nil +} + +func (pp *partitionProducer) flushRetryBuffers() { + Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark) + for { + pp.highWatermark-- + + if pp.output == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) + goto flushDone + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + for _, msg := range pp.retryState[pp.highWatermark].buf { + pp.output <- msg + } + + flushDone: + pp.retryState[pp.highWatermark].buf = nil + if pp.retryState[pp.highWatermark].expectChaser { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark) + break + } else if pp.highWatermark == 0 { + Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition) + break + } + } +} + +func (pp *partitionProducer) updateLeader() error { + return pp.breaker.Run(func() (err error) { + if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil { + return err + } + + if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil { + return err + } + + pp.output = pp.parent.getBrokerProducer(pp.leader) + pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight + pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + + return nil + }) +} + +// one per broker; also constructs an associated flusher +func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage { + var ( + input = make(chan *ProducerMessage) + bridge = make(chan *produceSet) + responses = make(chan *brokerProducerResponse) + ) + + bp := &brokerProducer{ + parent: p, + broker: broker, + input: input, + output: bridge, + responses: responses, + buffer: newProduceSet(p), + currentRetries: make(map[string]map[int32]error), + } + go withRecover(bp.run) + + // minimal bridge to make the network response `select`able + go withRecover(func() { + for set := range bridge { + request := set.buildRequest() + + response, err := broker.Produce(request) + + responses <- &brokerProducerResponse{ + set: set, + err: err, + res: response, + } + } + close(responses) + }) + + return input +} + +type brokerProducerResponse struct { + set *produceSet + err error + res *ProduceResponse +} + +// groups messages together into appropriately-sized batches for sending to the broker +// handles state related to retries etc +type brokerProducer struct { + parent *asyncProducer + broker *Broker + + input <-chan *ProducerMessage + output chan<- *produceSet + responses <-chan *brokerProducerResponse + + buffer *produceSet + timer <-chan time.Time + timerFired bool + + closing error + currentRetries map[string]map[int32]error +} + +func (bp *brokerProducer) run() { + var output chan<- *produceSet + Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID()) + + for { + select { + case msg := <-bp.input: + if msg == nil { + bp.shutdown() + return + } + + if msg.flags&syn == syn { + Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n", + bp.broker.ID(), msg.Topic, msg.Partition) + if bp.currentRetries[msg.Topic] == nil { + bp.currentRetries[msg.Topic] = make(map[int32]error) + } + bp.currentRetries[msg.Topic][msg.Partition] = nil + bp.parent.inFlight.Done() + continue + } + + if reason := bp.needsRetry(msg); reason != nil { + bp.parent.retryMessage(msg, reason) + + if bp.closing == nil && msg.flags&fin == fin { + // we were retrying this partition but we can start processing again + delete(bp.currentRetries[msg.Topic], msg.Partition) + Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n", + bp.broker.ID(), msg.Topic, msg.Partition) + } + + continue + } + + if bp.buffer.wouldOverflow(msg) { + if err := bp.waitForSpace(msg); err != nil { + bp.parent.retryMessage(msg, err) + continue + } + } + + if err := bp.buffer.add(msg); err != nil { + bp.parent.returnError(msg, err) + continue + } + + if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil { + bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency) + } + case <-bp.timer: + bp.timerFired = true + case output <- bp.buffer: + bp.rollOver() + case response := <-bp.responses: + bp.handleResponse(response) + } + + if bp.timerFired || bp.buffer.readyToFlush() { + output = bp.output + } else { + output = nil + } + } +} + +func (bp *brokerProducer) shutdown() { + for !bp.buffer.empty() { + select { + case response := <-bp.responses: + bp.handleResponse(response) + case bp.output <- bp.buffer: + bp.rollOver() + } + } + close(bp.output) + for response := range bp.responses { + bp.handleResponse(response) + } + + Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID()) +} + +func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { + if bp.closing != nil { + return bp.closing + } + + return bp.currentRetries[msg.Topic][msg.Partition] +} + +func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { + Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) + + for { + select { + case response := <-bp.responses: + bp.handleResponse(response) + // handling a response can change our state, so re-check some things + if reason := bp.needsRetry(msg); reason != nil { + return reason + } else if !bp.buffer.wouldOverflow(msg) { + return nil + } + case bp.output <- bp.buffer: + bp.rollOver() + return nil + } + } +} + +func (bp *brokerProducer) rollOver() { + bp.timer = nil + bp.timerFired = false + bp.buffer = newProduceSet(bp.parent) +} + +func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) { + if response.err != nil { + bp.handleError(response.set, response.err) + } else { + bp.handleSuccess(response.set, response.res) + } + + if bp.buffer.empty() { + bp.rollOver() // this can happen if the response invalidated our buffer + } +} + +func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) { + // we iterate through the blocks in the request set, not the response, so that we notice + // if the response is missing a block completely + sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + if response == nil { + // this only happens when RequiredAcks is NoResponse, so we have to assume success + bp.parent.returnSuccesses(msgs) + return + } + + block := response.GetBlock(topic, partition) + if block == nil { + bp.parent.returnErrors(msgs, ErrIncompleteResponse) + return + } + + switch block.Err { + // Success + case ErrNoError: + if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() { + for _, msg := range msgs { + msg.Timestamp = block.Timestamp + } + } + for i, msg := range msgs { + msg.Offset = block.Offset + int64(i) + } + bp.parent.returnSuccesses(msgs) + // Retriable errors + case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", + bp.broker.ID(), topic, partition, block.Err) + bp.currentRetries[topic][partition] = block.Err + bp.parent.retryMessages(msgs, block.Err) + bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) + // Other non-retriable errors + default: + bp.parent.returnErrors(msgs, block.Err) + } + }) +} + +func (bp *brokerProducer) handleError(sent *produceSet, err error) { + switch err.(type) { + case PacketEncodingError: + sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + bp.parent.returnErrors(msgs, err) + }) + default: + Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err) + bp.parent.abandonBrokerConnection(bp.broker) + _ = bp.broker.Close() + bp.closing = err + sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + bp.parent.retryMessages(msgs, err) + }) + bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + bp.parent.retryMessages(msgs, err) + }) + bp.rollOver() + } +} + +// singleton +// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock +// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel +func (p *asyncProducer) retryHandler() { + var msg *ProducerMessage + buf := queue.New() + + for { + if buf.Length() == 0 { + msg = <-p.retries + } else { + select { + case msg = <-p.retries: + case p.input <- buf.Peek().(*ProducerMessage): + buf.Remove() + continue + } + } + + if msg == nil { + return + } + + buf.Add(msg) + } +} + +// utility functions + +func (p *asyncProducer) shutdown() { + Logger.Println("Producer shutting down.") + p.inFlight.Add(1) + p.input <- &ProducerMessage{flags: shutdown} + + p.inFlight.Wait() + + if p.ownClient { + err := p.client.Close() + if err != nil { + Logger.Println("producer/shutdown failed to close the embedded client:", err) + } + } + + close(p.input) + close(p.retries) + close(p.errors) + close(p.successes) +} + +func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { + msg.clear() + pErr := &ProducerError{Msg: msg, Err: err} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + p.inFlight.Done() +} + +func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) { + for _, msg := range batch { + p.returnError(msg, err) + } +} + +func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) { + for _, msg := range batch { + if p.conf.Producer.Return.Successes { + msg.clear() + p.successes <- msg + } + p.inFlight.Done() + } +} + +func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) { + if msg.retries >= p.conf.Producer.Retry.Max { + p.returnError(msg, err) + } else { + msg.retries++ + p.retries <- msg + } +} + +func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { + for _, msg := range batch { + p.retryMessage(msg, err) + } +} + +func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + bp := p.brokers[broker] + + if bp == nil { + bp = p.newBrokerProducer(broker) + p.brokers[broker] = bp + p.brokerRefs[bp] = 0 + } + + p.brokerRefs[bp]++ + + return bp +} + +func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + p.brokerRefs[bp]-- + if p.brokerRefs[bp] == 0 { + close(bp) + delete(p.brokerRefs, bp) + + if p.brokers[broker] == bp { + delete(p.brokers, broker) + } + } +} + +func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + delete(p.brokers, broker) +} diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go new file mode 100644 index 00000000000..848cc9c90c5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/config_resource_type.go @@ -0,0 +1,15 @@ +package sarama + +type ConfigResourceType int8 + +// Taken from : +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes + +const ( + UnknownResource ConfigResourceType = 0 + AnyResource ConfigResourceType = 1 + TopicResource ConfigResourceType = 2 + GroupResource ConfigResourceType = 3 + ClusterResource ConfigResourceType = 4 + BrokerResource ConfigResourceType = 5 +) diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go new file mode 100644 index 00000000000..33d9d143f91 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -0,0 +1,807 @@ +package sarama + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// ConsumerMessage encapsulates a Kafka message returned by the consumer. +type ConsumerMessage struct { + Key, Value []byte + Topic string + Partition int32 + Offset int64 + Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp + BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp + Headers []*RecordHeader // only set if kafka is version 0.11+ +} + +// ConsumerError is what is provided to the user when an error occurs. +// It wraps an error and includes the topic and partition. +type ConsumerError struct { + Topic string + Partition int32 + Err error +} + +func (ce ConsumerError) Error() string { + return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err) +} + +// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. +// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors +// when stopping. +type ConsumerErrors []*ConsumerError + +func (ce ConsumerErrors) Error() string { + return fmt.Sprintf("kafka: %d errors while consuming", len(ce)) +} + +// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() +// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of +// scope. +// +// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking. +// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library +// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the +// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. +type Consumer interface { + + // Topics returns the set of available topics as retrieved from the cluster + // metadata. This method is the same as Client.Topics(), and is provided for + // convenience. + Topics() ([]string, error) + + // Partitions returns the sorted list of all partition IDs for the given topic. + // This method is the same as Client.Partitions(), and is provided for convenience. + Partitions(topic string) ([]int32, error) + + // ConsumePartition creates a PartitionConsumer on the given topic/partition with + // the given offset. It will return an error if this Consumer is already consuming + // on the given topic/partition. Offset can be a literal offset, or OffsetNewest + // or OffsetOldest + ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) + + // HighWaterMarks returns the current high water marks for each topic and partition. + // Consistency between partitions is not guaranteed since high water marks are updated separately. + HighWaterMarks() map[string]map[int32]int64 + + // Close shuts down the consumer. It must be called after all child + // PartitionConsumers have already been closed. + Close() error +} + +type consumer struct { + client Client + conf *Config + ownClient bool + + lock sync.Mutex + children map[string]map[int32]*partitionConsumer + brokerConsumers map[*Broker]*brokerConsumer +} + +// NewConsumer creates a new consumer using the given broker addresses and configuration. +func NewConsumer(addrs []string, config *Config) (Consumer, error) { + client, err := NewClient(addrs, config) + if err != nil { + return nil, err + } + + c, err := NewConsumerFromClient(client) + if err != nil { + return nil, err + } + c.(*consumer).ownClient = true + return c, nil +} + +// NewConsumerFromClient creates a new consumer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this consumer. +func NewConsumerFromClient(client Client) (Consumer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + c := &consumer{ + client: client, + conf: client.Config(), + children: make(map[string]map[int32]*partitionConsumer), + brokerConsumers: make(map[*Broker]*brokerConsumer), + } + + return c, nil +} + +func (c *consumer) Close() error { + if c.ownClient { + return c.client.Close() + } + return nil +} + +func (c *consumer) Topics() ([]string, error) { + return c.client.Topics() +} + +func (c *consumer) Partitions(topic string) ([]int32, error) { + return c.client.Partitions(topic) +} + +func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { + child := &partitionConsumer{ + consumer: c, + conf: c.conf, + topic: topic, + partition: partition, + messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), + errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), + feeder: make(chan *FetchResponse, 1), + trigger: make(chan none, 1), + dying: make(chan none), + fetchSize: c.conf.Consumer.Fetch.Default, + } + + if err := child.chooseStartingOffset(offset); err != nil { + return nil, err + } + + var leader *Broker + var err error + if leader, err = c.client.Leader(child.topic, child.partition); err != nil { + return nil, err + } + + if err := c.addChild(child); err != nil { + return nil, err + } + + go withRecover(child.dispatcher) + go withRecover(child.responseFeeder) + + child.broker = c.refBrokerConsumer(leader) + child.broker.input <- child + + return child, nil +} + +func (c *consumer) HighWaterMarks() map[string]map[int32]int64 { + c.lock.Lock() + defer c.lock.Unlock() + + hwms := make(map[string]map[int32]int64) + for topic, p := range c.children { + hwm := make(map[int32]int64, len(p)) + for partition, pc := range p { + hwm[partition] = pc.HighWaterMarkOffset() + } + hwms[topic] = hwm + } + + return hwms +} + +func (c *consumer) addChild(child *partitionConsumer) error { + c.lock.Lock() + defer c.lock.Unlock() + + topicChildren := c.children[child.topic] + if topicChildren == nil { + topicChildren = make(map[int32]*partitionConsumer) + c.children[child.topic] = topicChildren + } + + if topicChildren[child.partition] != nil { + return ConfigurationError("That topic/partition is already being consumed") + } + + topicChildren[child.partition] = child + return nil +} + +func (c *consumer) removeChild(child *partitionConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.children[child.topic], child.partition) +} + +func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer { + c.lock.Lock() + defer c.lock.Unlock() + + bc := c.brokerConsumers[broker] + if bc == nil { + bc = c.newBrokerConsumer(broker) + c.brokerConsumers[broker] = bc + } + + bc.refs++ + + return bc +} + +func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + brokerWorker.refs-- + + if brokerWorker.refs == 0 { + close(brokerWorker.input) + if c.brokerConsumers[brokerWorker.broker] == brokerWorker { + delete(c.brokerConsumers, brokerWorker.broker) + } + } +} + +func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.brokerConsumers, brokerWorker.broker) +} + +// PartitionConsumer + +// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or +// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out +// of scope. +// +// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range +// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported +// as out of range by the brokers. In this case you should decide what you want to do (try a different offset, +// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying. +// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set +// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement +// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. +// +// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of +// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process +// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call +// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will +// also drain the Messages channel, harvest all errors & return them once cleanup has completed. +type PartitionConsumer interface { + + // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you + // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this + // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call + // this before calling Close on the underlying client. + AsyncClose() + + // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain + // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service + // the Messages channel when this function is called, you will be competing with Close for messages; consider + // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes + // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client. + Close() error + + // Messages returns the read channel for the messages that are returned by + // the broker. + Messages() <-chan *ConsumerMessage + + // Errors returns a read channel of errors that occurred during consuming, if + // enabled. By default, errors are logged and not returned over this channel. + // If you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan *ConsumerError + + // HighWaterMarkOffset returns the high water mark offset of the partition, + // i.e. the offset that will be used for the next message that will be produced. + // You can use this to determine how far behind the processing is. + HighWaterMarkOffset() int64 +} + +type partitionConsumer struct { + highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG + consumer *consumer + conf *Config + topic string + partition int32 + + broker *brokerConsumer + messages chan *ConsumerMessage + errors chan *ConsumerError + feeder chan *FetchResponse + + trigger, dying chan none + responseResult error + closeOnce sync.Once + + fetchSize int32 + offset int64 +} + +var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing + +func (child *partitionConsumer) sendError(err error) { + cErr := &ConsumerError{ + Topic: child.topic, + Partition: child.partition, + Err: err, + } + + if child.conf.Consumer.Return.Errors { + child.errors <- cErr + } else { + Logger.Println(cErr) + } +} + +func (child *partitionConsumer) dispatcher() { + for range child.trigger { + select { + case <-child.dying: + close(child.trigger) + case <-time.After(child.conf.Consumer.Retry.Backoff): + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + child.broker = nil + } + + Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition) + if err := child.dispatch(); err != nil { + child.sendError(err) + child.trigger <- none{} + } + } + } + + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + } + child.consumer.removeChild(child) + close(child.feeder) +} + +func (child *partitionConsumer) dispatch() error { + if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { + return err + } + + var leader *Broker + var err error + if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil { + return err + } + + child.broker = child.consumer.refBrokerConsumer(leader) + + child.broker.input <- child + + return nil +} + +func (child *partitionConsumer) chooseStartingOffset(offset int64) error { + newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest) + if err != nil { + return err + } + oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) + if err != nil { + return err + } + + switch { + case offset == OffsetNewest: + child.offset = newestOffset + case offset == OffsetOldest: + child.offset = oldestOffset + case offset >= oldestOffset && offset <= newestOffset: + child.offset = offset + default: + return ErrOffsetOutOfRange + } + + return nil +} + +func (child *partitionConsumer) Messages() <-chan *ConsumerMessage { + return child.messages +} + +func (child *partitionConsumer) Errors() <-chan *ConsumerError { + return child.errors +} + +func (child *partitionConsumer) AsyncClose() { + // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes + // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and + // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will + // also just close itself) + child.closeOnce.Do(func() { + close(child.dying) + }) +} + +func (child *partitionConsumer) Close() error { + child.AsyncClose() + + go withRecover(func() { + for range child.messages { + // drain + } + }) + + var errors ConsumerErrors + for err := range child.errors { + errors = append(errors, err) + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (child *partitionConsumer) HighWaterMarkOffset() int64 { + return atomic.LoadInt64(&child.highWaterMarkOffset) +} + +func (child *partitionConsumer) responseFeeder() { + var msgs []*ConsumerMessage + expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime) + firstAttempt := true + +feederLoop: + for response := range child.feeder { + msgs, child.responseResult = child.parseResponse(response) + + for i, msg := range msgs { + messageSelect: + select { + case child.messages <- msg: + firstAttempt = true + case <-expiryTicker.C: + if !firstAttempt { + child.responseResult = errTimedOut + child.broker.acks.Done() + for _, msg = range msgs[i:] { + child.messages <- msg + } + child.broker.input <- child + continue feederLoop + } else { + // current message has not been sent, return to select + // statement + firstAttempt = false + goto messageSelect + } + } + } + + child.broker.acks.Done() + } + + expiryTicker.Stop() + close(child.messages) + close(child.errors) +} + +func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) { + var messages []*ConsumerMessage + for _, msgBlock := range msgSet.Messages { + for _, msg := range msgBlock.Messages() { + offset := msg.Offset + if msg.Msg.Version >= 1 { + baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset + offset += baseOffset + } + if offset < child.offset { + continue + } + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: msg.Msg.Key, + Value: msg.Msg.Value, + Offset: offset, + Timestamp: msg.Msg.Timestamp, + BlockTimestamp: msgBlock.Msg.Timestamp, + }) + child.offset = offset + 1 + } + } + if len(messages) == 0 { + return nil, ErrIncompleteResponse + } + return messages, nil +} + +func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) { + var messages []*ConsumerMessage + for _, rec := range batch.Records { + offset := batch.FirstOffset + rec.OffsetDelta + if offset < child.offset { + continue + } + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: rec.Key, + Value: rec.Value, + Offset: offset, + Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta), + Headers: rec.Headers, + }) + child.offset = offset + 1 + } + if len(messages) == 0 { + child.offset += 1 + } + return messages, nil +} + +func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { + block := response.GetBlock(child.topic, child.partition) + if block == nil { + return nil, ErrIncompleteResponse + } + + if block.Err != ErrNoError { + return nil, block.Err + } + + nRecs, err := block.numRecords() + if err != nil { + return nil, err + } + if nRecs == 0 { + partialTrailingMessage, err := block.isPartial() + if err != nil { + return nil, err + } + // We got no messages. If we got a trailing one then we need to ask for more data. + // Otherwise we just poll again and wait for one to be produced... + if partialTrailingMessage { + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { + // we can't ask for more data, we've hit the configured limit + child.sendError(ErrMessageTooLarge) + child.offset++ // skip this one so we can keep processing future messages + } else { + child.fetchSize *= 2 + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { + child.fetchSize = child.conf.Consumer.Fetch.Max + } + } + } + + return nil, nil + } + + // we got messages, reset our fetch size in case it was increased for a previous request + child.fetchSize = child.conf.Consumer.Fetch.Default + atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) + + messages := []*ConsumerMessage{} + for _, records := range block.RecordsSet { + switch records.recordsType { + case legacyRecords: + messageSetMessages, err := child.parseMessages(records.MsgSet) + if err != nil { + return nil, err + } + + messages = append(messages, messageSetMessages...) + case defaultRecords: + recordBatchMessages, err := child.parseRecords(records.RecordBatch) + if err != nil { + return nil, err + } + if control, err := records.isControl(); err != nil || control { + continue + } + + messages = append(messages, recordBatchMessages...) + default: + return nil, fmt.Errorf("unknown records type: %v", records.recordsType) + } + } + + return messages, nil +} + +// brokerConsumer + +type brokerConsumer struct { + consumer *consumer + broker *Broker + input chan *partitionConsumer + newSubscriptions chan []*partitionConsumer + wait chan none + subscriptions map[*partitionConsumer]none + acks sync.WaitGroup + refs int +} + +func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { + bc := &brokerConsumer{ + consumer: c, + broker: broker, + input: make(chan *partitionConsumer), + newSubscriptions: make(chan []*partitionConsumer), + wait: make(chan none), + subscriptions: make(map[*partitionConsumer]none), + refs: 0, + } + + go withRecover(bc.subscriptionManager) + go withRecover(bc.subscriptionConsumer) + + return bc +} + +func (bc *brokerConsumer) subscriptionManager() { + var buffer []*partitionConsumer + + // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer + // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks + // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give + // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, + // so the main goroutine can block waiting for work if it has none. + for { + if len(buffer) > 0 { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- buffer: + buffer = nil + case bc.wait <- none{}: + } + } else { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- nil: + } + } + } + +done: + close(bc.wait) + if len(buffer) > 0 { + bc.newSubscriptions <- buffer + } + close(bc.newSubscriptions) +} + +func (bc *brokerConsumer) subscriptionConsumer() { + <-bc.wait // wait for our first piece of work + + // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available + for newSubscriptions := range bc.newSubscriptions { + bc.updateSubscriptions(newSubscriptions) + + if len(bc.subscriptions) == 0 { + // We're about to be shut down or we're about to receive more subscriptions. + // Either way, the signal just hasn't propagated to our goroutine yet. + <-bc.wait + continue + } + + response, err := bc.fetchNewMessages() + + if err != nil { + Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err) + bc.abort(err) + return + } + + bc.acks.Add(len(bc.subscriptions)) + for child := range bc.subscriptions { + child.feeder <- response + } + bc.acks.Wait() + bc.handleResponses() + } +} + +func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) { + for _, child := range newSubscriptions { + bc.subscriptions[child] = none{} + Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + } + + for child := range bc.subscriptions { + select { + case <-child.dying: + Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + close(child.trigger) + delete(bc.subscriptions, child) + default: + break + } + } +} + +func (bc *brokerConsumer) handleResponses() { + // handles the response codes left for us by our subscriptions, and abandons ones that have been closed + for child := range bc.subscriptions { + result := child.responseResult + child.responseResult = nil + + switch result { + case nil: + break + case errTimedOut: + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", + bc.broker.ID(), child.topic, child.partition) + delete(bc.subscriptions, child) + case ErrOffsetOutOfRange: + // there's no point in retrying this it will just fail the same way again + // shut it down and force the user to choose what to do + child.sendError(result) + Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) + close(child.trigger) + delete(bc.subscriptions, child) + case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable: + // not an error, but does need redispatching + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + default: + // dunno, tell the user and try redispatching + child.sendError(result) + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + } + } +} + +func (bc *brokerConsumer) abort(err error) { + bc.consumer.abandonBrokerConsumer(bc) + _ = bc.broker.Close() // we don't care about the error this might return, we already have one + + for child := range bc.subscriptions { + child.sendError(err) + child.trigger <- none{} + } + + for newSubscriptions := range bc.newSubscriptions { + if len(newSubscriptions) == 0 { + <-bc.wait + continue + } + for _, child := range newSubscriptions { + child.sendError(err) + child.trigger <- none{} + } + } +} + +func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { + request := &FetchRequest{ + MinBytes: bc.consumer.conf.Consumer.Fetch.Min, + MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), + } + if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { + request.Version = 2 + } + if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 3 + request.MaxBytes = MaxResponseSize + } + if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 4 + request.Isolation = ReadUncommitted // We don't support yet transactions. + } + + for child := range bc.subscriptions { + request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) + } + + return bc.broker.Fetch(request) +} diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go new file mode 100644 index 00000000000..9d92d350a5d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go @@ -0,0 +1,94 @@ +package sarama + +type ConsumerGroupMemberMetadata struct { + Version int16 + Topics []string + UserData []byte +} + +func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { + pe.putInt16(m.Version) + + if err := pe.putStringArray(m.Topics); err != nil { + return err + } + + if err := pe.putBytes(m.UserData); err != nil { + return err + } + + return nil +} + +func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { + if m.Version, err = pd.getInt16(); err != nil { + return + } + + if m.Topics, err = pd.getStringArray(); err != nil { + return + } + + if m.UserData, err = pd.getBytes(); err != nil { + return + } + + return nil +} + +type ConsumerGroupMemberAssignment struct { + Version int16 + Topics map[string][]int32 + UserData []byte +} + +func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error { + pe.putInt16(m.Version) + + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + if err := pe.putBytes(m.UserData); err != nil { + return err + } + + return nil +} + +func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) { + if m.Version, err = pd.getInt16(); err != nil { + return + } + + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + + if m.UserData, err = pd.getBytes(); err != nil { + return + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go new file mode 100644 index 00000000000..4de45e7bf50 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go @@ -0,0 +1,33 @@ +package sarama + +type ConsumerMetadataRequest struct { + ConsumerGroup string +} + +func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { + tmp := new(FindCoordinatorRequest) + tmp.CoordinatorKey = r.ConsumerGroup + tmp.CoordinatorType = CoordinatorGroup + return tmp.encode(pe) +} + +func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) { + tmp := new(FindCoordinatorRequest) + if err := tmp.decode(pd, version); err != nil { + return err + } + r.ConsumerGroup = tmp.CoordinatorKey + return nil +} + +func (r *ConsumerMetadataRequest) key() int16 { + return 10 +} + +func (r *ConsumerMetadataRequest) version() int16 { + return 0 +} + +func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go new file mode 100644 index 00000000000..442cbde7ac0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go @@ -0,0 +1,77 @@ +package sarama + +import ( + "net" + "strconv" +) + +type ConsumerMetadataResponse struct { + Err KError + Coordinator *Broker + CoordinatorID int32 // deprecated: use Coordinator.ID() + CoordinatorHost string // deprecated: use Coordinator.Addr() + CoordinatorPort int32 // deprecated: use Coordinator.Addr() +} + +func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) { + tmp := new(FindCoordinatorResponse) + + if err := tmp.decode(pd, version); err != nil { + return err + } + + r.Err = tmp.Err + + r.Coordinator = tmp.Coordinator + if tmp.Coordinator == nil { + return nil + } + + // this can all go away in 2.0, but we have to fill in deprecated fields to maintain + // backwards compatibility + host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) + if err != nil { + return err + } + port, err := strconv.ParseInt(portstr, 10, 32) + if err != nil { + return err + } + r.CoordinatorID = r.Coordinator.ID() + r.CoordinatorHost = host + r.CoordinatorPort = int32(port) + + return nil +} + +func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { + if r.Coordinator == nil { + r.Coordinator = new(Broker) + r.Coordinator.id = r.CoordinatorID + r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort))) + } + + tmp := &FindCoordinatorResponse{ + Version: 0, + Err: r.Err, + Coordinator: r.Coordinator, + } + + if err := tmp.encode(pe); err != nil { + return err + } + + return nil +} + +func (r *ConsumerMetadataResponse) key() int16 { + return 10 +} + +func (r *ConsumerMetadataResponse) version() int16 { + return 0 +} + +func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go new file mode 100644 index 00000000000..1f144431a8b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/crc32_field.go @@ -0,0 +1,69 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "hash/crc32" +) + +type crcPolynomial int8 + +const ( + crcIEEE crcPolynomial = iota + crcCastagnoli +) + +var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) + +// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. +type crc32Field struct { + startOffset int + polynomial crcPolynomial +} + +func (c *crc32Field) saveOffset(in int) { + c.startOffset = in +} + +func (c *crc32Field) reserveLength() int { + return 4 +} + +func newCRC32Field(polynomial crcPolynomial) *crc32Field { + return &crc32Field{polynomial: polynomial} +} + +func (c *crc32Field) run(curOffset int, buf []byte) error { + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } + binary.BigEndian.PutUint32(buf[c.startOffset:], crc) + return nil +} + +func (c *crc32Field) check(curOffset int, buf []byte) error { + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } + + expected := binary.BigEndian.Uint32(buf[c.startOffset:]) + if crc != expected { + return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)} + } + + return nil +} +func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) { + var tab *crc32.Table + switch c.polynomial { + case crcIEEE: + tab = crc32.IEEETable + case crcCastagnoli: + tab = castagnoliTable + default: + return 0, PacketDecodingError{"invalid CRC type"} + } + return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go new file mode 100644 index 00000000000..af321e99466 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go @@ -0,0 +1,121 @@ +package sarama + +import "time" + +type CreatePartitionsRequest struct { + TopicPartitions map[string]*TopicPartition + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreatePartitionsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil { + return err + } + + for topic, partition := range c.TopicPartitions { + if err := pe.putString(topic); err != nil { + return err + } + if err := partition.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + pe.putBool(c.ValidateOnly) + + return nil +} + +func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + c.TopicPartitions = make(map[string]*TopicPartition, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitions[topic] = new(TopicPartition) + if err := c.TopicPartitions[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if c.ValidateOnly, err = pd.getBool(); err != nil { + return err + } + + return nil +} + +func (r *CreatePartitionsRequest) key() int16 { + return 37 +} + +func (r *CreatePartitionsRequest) version() int16 { + return 0 +} + +func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartition struct { + Count int32 + Assignment [][]int32 +} + +func (t *TopicPartition) encode(pe packetEncoder) error { + pe.putInt32(t.Count) + + if len(t.Assignment) == 0 { + pe.putInt32(-1) + return nil + } + + if err := pe.putArrayLength(len(t.Assignment)); err != nil { + return err + } + + for _, assign := range t.Assignment { + if err := pe.putInt32Array(assign); err != nil { + return err + } + } + + return nil +} + +func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) { + if t.Count, err = pd.getInt32(); err != nil { + return err + } + + n, err := pd.getInt32() + if err != nil { + return err + } + if n <= 0 { + return nil + } + t.Assignment = make([][]int32, n) + + for i := 0; i < int(n); i++ { + if t.Assignment[i], err = pd.getInt32Array(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go new file mode 100644 index 00000000000..abd621c64ec --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go @@ -0,0 +1,94 @@ +package sarama + +import "time" + +type CreatePartitionsResponse struct { + ThrottleTime time.Duration + TopicPartitionErrors map[string]*TopicPartitionError +} + +func (c *CreatePartitionsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil { + return err + } + + for topic, partitionError := range c.TopicPartitionErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := partitionError.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitionErrors[topic] = new(TopicPartitionError) + if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (r *CreatePartitionsResponse) key() int16 { + return 37 +} + +func (r *CreatePartitionsResponse) version() int16 { + return 0 +} + +func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartitionError struct { + Err KError + ErrMsg *string +} + +func (t *TopicPartitionError) encode(pe packetEncoder) error { + pe.putInt16(int16(t.Err)) + + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + + return nil +} + +func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kerr) + + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go new file mode 100644 index 00000000000..709c0a44e71 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_request.go @@ -0,0 +1,174 @@ +package sarama + +import ( + "time" +) + +type CreateTopicsRequest struct { + Version int16 + + TopicDetails map[string]*TopicDetail + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreateTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicDetails)); err != nil { + return err + } + for topic, detail := range c.TopicDetails { + if err := pe.putString(topic); err != nil { + return err + } + if err := detail.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + if c.Version >= 1 { + pe.putBool(c.ValidateOnly) + } + + return nil +} + +func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicDetails = make(map[string]*TopicDetail, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicDetails[topic] = new(TopicDetail) + if err = c.TopicDetails[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if version >= 1 { + c.ValidateOnly, err = pd.getBool() + if err != nil { + return err + } + + c.Version = version + } + + return nil +} + +func (c *CreateTopicsRequest) key() int16 { + return 19 +} + +func (c *CreateTopicsRequest) version() int16 { + return c.Version +} + +func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicDetail struct { + NumPartitions int32 + ReplicationFactor int16 + ReplicaAssignment map[int32][]int32 + ConfigEntries map[string]*string +} + +func (t *TopicDetail) encode(pe packetEncoder) error { + pe.putInt32(t.NumPartitions) + pe.putInt16(t.ReplicationFactor) + + if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil { + return err + } + for partition, assignment := range t.ReplicaAssignment { + pe.putInt32(partition) + if err := pe.putInt32Array(assignment); err != nil { + return err + } + } + + if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil { + return err + } + for configKey, configValue := range t.ConfigEntries { + if err := pe.putString(configKey); err != nil { + return err + } + if err := pe.putNullableString(configValue); err != nil { + return err + } + } + + return nil +} + +func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) { + if t.NumPartitions, err = pd.getInt32(); err != nil { + return err + } + if t.ReplicationFactor, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ReplicaAssignment = make(map[int32][]int32, n) + for i := 0; i < n; i++ { + replica, err := pd.getInt32() + if err != nil { + return err + } + if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil { + return err + } + } + } + + n, err = pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ConfigEntries = make(map[string]*string, n) + for i := 0; i < n; i++ { + configKey, err := pd.getString() + if err != nil { + return err + } + if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go new file mode 100644 index 00000000000..66207e00c5d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_response.go @@ -0,0 +1,112 @@ +package sarama + +import "time" + +type CreateTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrors map[string]*TopicError +} + +func (c *CreateTopicsResponse) encode(pe packetEncoder) error { + if c.Version >= 2 { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(c.TopicErrors)); err != nil { + return err + } + for topic, topicError := range c.TopicErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := topicError.encode(pe, c.Version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + c.Version = version + + if version >= 2 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicErrors = make(map[string]*TopicError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicErrors[topic] = new(TopicError) + if err := c.TopicErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) key() int16 { + return 19 +} + +func (c *CreateTopicsResponse) version() int16 { + return c.Version +} + +func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicError struct { + Err KError + ErrMsg *string +} + +func (t *TopicError) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(t.Err)) + + if version >= 1 { + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + } + + return nil +} + +func (t *TopicError) decode(pd packetDecoder, version int16) (err error) { + kErr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kErr) + + if version >= 1 { + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go new file mode 100644 index 00000000000..305a324ac2d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_groups_request.go @@ -0,0 +1,30 @@ +package sarama + +type DeleteGroupsRequest struct { + Groups []string +} + +func (r *DeleteGroupsRequest) encode(pe packetEncoder) error { + return pe.putStringArray(r.Groups) +} + +func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Groups, err = pd.getStringArray() + return +} + +func (r *DeleteGroupsRequest) key() int16 { + return 42 +} + +func (r *DeleteGroupsRequest) version() int16 { + return 0 +} + +func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion { + return V1_1_0_0 +} + +func (r *DeleteGroupsRequest) AddGroup(group string) { + r.Groups = append(r.Groups, group) +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go new file mode 100644 index 00000000000..c067ebb42b0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_groups_response.go @@ -0,0 +1,70 @@ +package sarama + +import ( + "time" +) + +type DeleteGroupsResponse struct { + ThrottleTime time.Duration + GroupErrorCodes map[string]KError +} + +func (r *DeleteGroupsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil { + return err + } + for groupID, errorCode := range r.GroupErrorCodes { + if err := pe.putString(groupID); err != nil { + return err + } + pe.putInt16(int16(errorCode)) + } + + return nil +} + +func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupErrorCodes = make(map[string]KError, n) + for i := 0; i < n; i++ { + groupID, err := pd.getString() + if err != nil { + return err + } + errorCode, err := pd.getInt16() + if err != nil { + return err + } + + r.GroupErrorCodes[groupID] = KError(errorCode) + } + + return nil +} + +func (r *DeleteGroupsResponse) key() int16 { + return 42 +} + +func (r *DeleteGroupsResponse) version() int16 { + return 0 +} + +func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion { + return V1_1_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go new file mode 100644 index 00000000000..93efafd4d0b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_records_request.go @@ -0,0 +1,126 @@ +package sarama + +import ( + "sort" + "time" +) + +// request message format is: +// [topic] timeout(int32) +// where topic is: +// name(string) [partition] +// where partition is: +// id(int32) offset(int64) + +type DeleteRecordsRequest struct { + Topics map[string]*DeleteRecordsRequestTopic + Timeout time.Duration +} + +func (d *DeleteRecordsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(d.Topics)); err != nil { + return err + } + keys := make([]string, 0, len(d.Topics)) + for topic := range d.Topics { + keys = append(keys, topic) + } + sort.Strings(keys) + for _, topic := range keys { + if err := pe.putString(topic); err != nil { + return err + } + if err := d.Topics[topic].encode(pe); err != nil { + return err + } + } + pe.putInt32(int32(d.Timeout / time.Millisecond)) + + return nil +} + +func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + d.Topics = make(map[string]*DeleteRecordsRequestTopic, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + details := new(DeleteRecordsRequestTopic) + if err = details.decode(pd, version); err != nil { + return err + } + d.Topics[topic] = details + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + d.Timeout = time.Duration(timeout) * time.Millisecond + + return nil +} + +func (d *DeleteRecordsRequest) key() int16 { + return 21 +} + +func (d *DeleteRecordsRequest) version() int16 { + return 0 +} + +func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type DeleteRecordsRequestTopic struct { + PartitionOffsets map[int32]int64 // partition => offset +} + +func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil { + return err + } + keys := make([]int32, 0, len(t.PartitionOffsets)) + for partition := range t.PartitionOffsets { + keys = append(keys, partition) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + for _, partition := range keys { + pe.putInt32(partition) + pe.putInt64(t.PartitionOffsets[partition]) + } + return nil +} + +func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.PartitionOffsets = make(map[int32]int64, n) + for i := 0; i < n; i++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + offset, err := pd.getInt64() + if err != nil { + return err + } + t.PartitionOffsets[partition] = offset + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go new file mode 100644 index 00000000000..733a58b6bc3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_records_response.go @@ -0,0 +1,158 @@ +package sarama + +import ( + "sort" + "time" +) + +// response message format is: +// throttleMs(int32) [topic] +// where topic is: +// name(string) [partition] +// where partition is: +// id(int32) low_watermark(int64) error_code(int16) + +type DeleteRecordsResponse struct { + Version int16 + ThrottleTime time.Duration + Topics map[string]*DeleteRecordsResponseTopic +} + +func (d *DeleteRecordsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(d.Topics)); err != nil { + return err + } + keys := make([]string, 0, len(d.Topics)) + for topic := range d.Topics { + keys = append(keys, topic) + } + sort.Strings(keys) + for _, topic := range keys { + if err := pe.putString(topic); err != nil { + return err + } + if err := d.Topics[topic].encode(pe); err != nil { + return err + } + } + return nil +} + +func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error { + d.Version = version + + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + d.Topics = make(map[string]*DeleteRecordsResponseTopic, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + details := new(DeleteRecordsResponseTopic) + if err = details.decode(pd, version); err != nil { + return err + } + d.Topics[topic] = details + } + } + + return nil +} + +func (d *DeleteRecordsResponse) key() int16 { + return 21 +} + +func (d *DeleteRecordsResponse) version() int16 { + return 0 +} + +func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type DeleteRecordsResponseTopic struct { + Partitions map[int32]*DeleteRecordsResponsePartition +} + +func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(t.Partitions)); err != nil { + return err + } + keys := make([]int32, 0, len(t.Partitions)) + for partition := range t.Partitions { + keys = append(keys, partition) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + for _, partition := range keys { + pe.putInt32(partition) + if err := t.Partitions[partition].encode(pe); err != nil { + return err + } + } + return nil +} + +func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n) + for i := 0; i < n; i++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + details := new(DeleteRecordsResponsePartition) + if err = details.decode(pd, version); err != nil { + return err + } + t.Partitions[partition] = details + } + } + + return nil +} + +type DeleteRecordsResponsePartition struct { + LowWatermark int64 + Err KError +} + +func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error { + pe.putInt64(t.LowWatermark) + pe.putInt16(int16(t.Err)) + return nil +} + +func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error { + lowWatermark, err := pd.getInt64() + if err != nil { + return err + } + t.LowWatermark = lowWatermark + + kErr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kErr) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go new file mode 100644 index 00000000000..911f67d31ba --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go @@ -0,0 +1,48 @@ +package sarama + +import "time" + +type DeleteTopicsRequest struct { + Version int16 + Topics []string + Timeout time.Duration +} + +func (d *DeleteTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putStringArray(d.Topics); err != nil { + return err + } + pe.putInt32(int32(d.Timeout / time.Millisecond)) + + return nil +} + +func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + if d.Topics, err = pd.getStringArray(); err != nil { + return err + } + timeout, err := pd.getInt32() + if err != nil { + return err + } + d.Timeout = time.Duration(timeout) * time.Millisecond + d.Version = version + return nil +} + +func (d *DeleteTopicsRequest) key() int16 { + return 20 +} + +func (d *DeleteTopicsRequest) version() int16 { + return d.Version +} + +func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go new file mode 100644 index 00000000000..34225460a31 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go @@ -0,0 +1,78 @@ +package sarama + +import "time" + +type DeleteTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrorCodes map[string]KError +} + +func (d *DeleteTopicsResponse) encode(pe packetEncoder) error { + if d.Version >= 1 { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil { + return err + } + for topic, errorCode := range d.TopicErrorCodes { + if err := pe.putString(topic); err != nil { + return err + } + pe.putInt16(int16(errorCode)) + } + + return nil +} + +func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 1 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + d.Version = version + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + d.TopicErrorCodes = make(map[string]KError, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + errorCode, err := pd.getInt16() + if err != nil { + return err + } + + d.TopicErrorCodes[topic] = KError(errorCode) + } + + return nil +} + +func (d *DeleteTopicsResponse) key() int16 { + return 20 +} + +func (d *DeleteTopicsResponse) version() int16 { + return d.Version +} + +func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go new file mode 100644 index 00000000000..7a7cffc3fb2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go @@ -0,0 +1,91 @@ +package sarama + +type ConfigResource struct { + Type ConfigResourceType + Name string + ConfigNames []string +} + +type DescribeConfigsRequest struct { + Resources []*ConfigResource +} + +func (r *DescribeConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Resources)); err != nil { + return err + } + + for _, c := range r.Resources { + pe.putInt8(int8(c.Type)) + if err := pe.putString(c.Name); err != nil { + return err + } + + if len(c.ConfigNames) == 0 { + pe.putInt32(-1) + continue + } + if err := pe.putStringArray(c.ConfigNames); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Resources = make([]*ConfigResource, n) + + for i := 0; i < n; i++ { + r.Resources[i] = &ConfigResource{} + t, err := pd.getInt8() + if err != nil { + return err + } + r.Resources[i].Type = ConfigResourceType(t) + name, err := pd.getString() + if err != nil { + return err + } + r.Resources[i].Name = name + + confLength, err := pd.getArrayLength() + + if err != nil { + return err + } + + if confLength == -1 { + continue + } + + cfnames := make([]string, confLength) + for i := 0; i < confLength; i++ { + s, err := pd.getString() + if err != nil { + return err + } + cfnames[i] = s + } + r.Resources[i].ConfigNames = cfnames + } + + return nil +} + +func (r *DescribeConfigsRequest) key() int16 { + return 32 +} + +func (r *DescribeConfigsRequest) version() int16 { + return 0 +} + +func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go new file mode 100644 index 00000000000..6e5d30e4f09 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go @@ -0,0 +1,188 @@ +package sarama + +import "time" + +type DescribeConfigsResponse struct { + ThrottleTime time.Duration + Resources []*ResourceResponse +} + +type ResourceResponse struct { + ErrorCode int16 + ErrorMsg string + Type ConfigResourceType + Name string + Configs []*ConfigEntry +} + +type ConfigEntry struct { + Name string + Value string + ReadOnly bool + Default bool + Sensitive bool +} + +func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + if err = pe.putArrayLength(len(r.Resources)); err != nil { + return err + } + + for _, c := range r.Resources { + if err = c.encode(pe); err != nil { + return err + } + } + return nil +} + +func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Resources = make([]*ResourceResponse, n) + for i := 0; i < n; i++ { + rr := &ResourceResponse{} + if err := rr.decode(pd, version); err != nil { + return err + } + r.Resources[i] = rr + } + + return nil +} + +func (r *DescribeConfigsResponse) key() int16 { + return 32 +} + +func (r *DescribeConfigsResponse) version() int16 { + return 0 +} + +func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +func (r *ResourceResponse) encode(pe packetEncoder) (err error) { + pe.putInt16(r.ErrorCode) + + if err = pe.putString(r.ErrorMsg); err != nil { + return err + } + + pe.putInt8(int8(r.Type)) + + if err = pe.putString(r.Name); err != nil { + return err + } + + if err = pe.putArrayLength(len(r.Configs)); err != nil { + return err + } + + for _, c := range r.Configs { + if err = c.encode(pe); err != nil { + return err + } + } + return nil +} + +func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) { + ec, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = ec + + em, err := pd.getString() + if err != nil { + return err + } + r.ErrorMsg = em + + t, err := pd.getInt8() + if err != nil { + return err + } + r.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + r.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Configs = make([]*ConfigEntry, n) + for i := 0; i < n; i++ { + c := &ConfigEntry{} + if err := c.decode(pd, version); err != nil { + return err + } + r.Configs[i] = c + } + return nil +} + +func (r *ConfigEntry) encode(pe packetEncoder) (err error) { + if err = pe.putString(r.Name); err != nil { + return err + } + + if err = pe.putString(r.Value); err != nil { + return err + } + + pe.putBool(r.ReadOnly) + pe.putBool(r.Default) + pe.putBool(r.Sensitive) + return nil +} + +func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) { + name, err := pd.getString() + if err != nil { + return err + } + r.Name = name + + value, err := pd.getString() + if err != nil { + return err + } + r.Value = value + + read, err := pd.getBool() + if err != nil { + return err + } + r.ReadOnly = read + + de, err := pd.getBool() + if err != nil { + return err + } + r.Default = de + + sensitive, err := pd.getBool() + if err != nil { + return err + } + r.Sensitive = sensitive + return nil +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go new file mode 100644 index 00000000000..1fb35677708 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go @@ -0,0 +1,30 @@ +package sarama + +type DescribeGroupsRequest struct { + Groups []string +} + +func (r *DescribeGroupsRequest) encode(pe packetEncoder) error { + return pe.putStringArray(r.Groups) +} + +func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Groups, err = pd.getStringArray() + return +} + +func (r *DescribeGroupsRequest) key() int16 { + return 15 +} + +func (r *DescribeGroupsRequest) version() int16 { + return 0 +} + +func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *DescribeGroupsRequest) AddGroup(group string) { + r.Groups = append(r.Groups, group) +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go new file mode 100644 index 00000000000..542b3a97170 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go @@ -0,0 +1,187 @@ +package sarama + +type DescribeGroupsResponse struct { + Groups []*GroupDescription +} + +func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + + for _, groupDescription := range r.Groups { + if err := groupDescription.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Groups = make([]*GroupDescription, n) + for i := 0; i < n; i++ { + r.Groups[i] = new(GroupDescription) + if err := r.Groups[i].decode(pd); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeGroupsResponse) key() int16 { + return 15 +} + +func (r *DescribeGroupsResponse) version() int16 { + return 0 +} + +func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +type GroupDescription struct { + Err KError + GroupId string + State string + ProtocolType string + Protocol string + Members map[string]*GroupMemberDescription +} + +func (gd *GroupDescription) encode(pe packetEncoder) error { + pe.putInt16(int16(gd.Err)) + + if err := pe.putString(gd.GroupId); err != nil { + return err + } + if err := pe.putString(gd.State); err != nil { + return err + } + if err := pe.putString(gd.ProtocolType); err != nil { + return err + } + if err := pe.putString(gd.Protocol); err != nil { + return err + } + + if err := pe.putArrayLength(len(gd.Members)); err != nil { + return err + } + + for memberId, groupMemberDescription := range gd.Members { + if err := pe.putString(memberId); err != nil { + return err + } + if err := groupMemberDescription.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (gd *GroupDescription) decode(pd packetDecoder) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + gd.Err = KError(kerr) + + if gd.GroupId, err = pd.getString(); err != nil { + return + } + if gd.State, err = pd.getString(); err != nil { + return + } + if gd.ProtocolType, err = pd.getString(); err != nil { + return + } + if gd.Protocol, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + gd.Members = make(map[string]*GroupMemberDescription) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + + gd.Members[memberId] = new(GroupMemberDescription) + if err := gd.Members[memberId].decode(pd); err != nil { + return err + } + } + + return nil +} + +type GroupMemberDescription struct { + ClientId string + ClientHost string + MemberMetadata []byte + MemberAssignment []byte +} + +func (gmd *GroupMemberDescription) encode(pe packetEncoder) error { + if err := pe.putString(gmd.ClientId); err != nil { + return err + } + if err := pe.putString(gmd.ClientHost); err != nil { + return err + } + if err := pe.putBytes(gmd.MemberMetadata); err != nil { + return err + } + if err := pe.putBytes(gmd.MemberAssignment); err != nil { + return err + } + + return nil +} + +func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) { + if gmd.ClientId, err = pd.getString(); err != nil { + return + } + if gmd.ClientHost, err = pd.getString(); err != nil { + return + } + if gmd.MemberMetadata, err = pd.getBytes(); err != nil { + return + } + if gmd.MemberAssignment, err = pd.getBytes(); err != nil { + return + } + + return nil +} + +func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + assignment := new(ConsumerGroupMemberAssignment) + err := decode(gmd.MemberAssignment, assignment) + return assignment, err +} + +func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) { + metadata := new(ConsumerGroupMemberMetadata) + err := decode(gmd.MemberMetadata, metadata) + return metadata, err +} diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go new file mode 100644 index 00000000000..7ce3bc0f6e2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go @@ -0,0 +1,89 @@ +package sarama + +import ( + "fmt" + + "github.com/rcrowley/go-metrics" +) + +// Encoder is the interface that wraps the basic Encode method. +// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. +type encoder interface { + encode(pe packetEncoder) error +} + +// Encode takes an Encoder and turns it into bytes while potentially recording metrics. +func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { + if e == nil { + return nil, nil + } + + var prepEnc prepEncoder + var realEnc realEncoder + + err := e.encode(&prepEnc) + if err != nil { + return nil, err + } + + if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) { + return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)} + } + + realEnc.raw = make([]byte, prepEnc.length) + realEnc.registry = metricRegistry + err = e.encode(&realEnc) + if err != nil { + return nil, err + } + + return realEnc.raw, nil +} + +// Decoder is the interface that wraps the basic Decode method. +// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules. +type decoder interface { + decode(pd packetDecoder) error +} + +type versionedDecoder interface { + decode(pd packetDecoder, version int16) error +} + +// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes, +// interpreted using Kafka's encoding rules. +func decode(buf []byte, in decoder) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} + +func versionedDecode(buf []byte, in versionedDecoder, version int16) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper, version) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go new file mode 100644 index 00000000000..2cd9b506d3f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/end_txn_request.go @@ -0,0 +1,50 @@ +package sarama + +type EndTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + TransactionResult bool +} + +func (a *EndTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + + pe.putInt64(a.ProducerID) + + pe.putInt16(a.ProducerEpoch) + + pe.putBool(a.TransactionResult) + + return nil +} + +func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + if a.TransactionResult, err = pd.getBool(); err != nil { + return err + } + return nil +} + +func (a *EndTxnRequest) key() int16 { + return 26 +} + +func (a *EndTxnRequest) version() int16 { + return 0 +} + +func (a *EndTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go new file mode 100644 index 00000000000..33b27e33d49 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/end_txn_response.go @@ -0,0 +1,44 @@ +package sarama + +import ( + "time" +) + +type EndTxnResponse struct { + ThrottleTime time.Duration + Err KError +} + +func (e *EndTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(e.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(e.Err)) + return nil +} + +func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + e.Err = KError(kerr) + + return nil +} + +func (e *EndTxnResponse) key() int16 { + return 25 +} + +func (e *EndTxnResponse) version() int16 { + return 0 +} + +func (e *EndTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go new file mode 100644 index 00000000000..c578ef5fb43 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/errors.go @@ -0,0 +1,281 @@ +package sarama + +import ( + "errors" + "fmt" +) + +// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored +// or otherwise failed to respond. +var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)") + +// ErrClosedClient is the error returned when a method is called on a client that has been closed. +var ErrClosedClient = errors.New("kafka: tried to use a client that was closed") + +// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does +// not contain the expected information. +var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks") + +// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index +// (meaning one outside of the range [0...numPartitions-1]). +var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index") + +// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting. +var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated") + +// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected. +var ErrNotConnected = errors.New("kafka: broker not connected") + +// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected +// when requesting messages, since as an optimization the server is allowed to return a partial message at the end +// of the message set. +var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected") + +// ErrShuttingDown is returned when a producer receives a message during shutdown. +var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down") + +// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max +var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") + +// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing +// a RecordBatch. +var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch") + +// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version +// is lower than 0.10.0.0. +var ErrControllerNotAvailable = errors.New("kafka: controller is not available") + +// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update +// the metadata. +var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata") + +// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, +// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. +type PacketEncodingError struct { + Info string +} + +func (err PacketEncodingError) Error() string { + return fmt.Sprintf("kafka: error encoding packet: %s", err.Info) +} + +// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response. +// This can be a bad CRC or length field, or any other invalid value. +type PacketDecodingError struct { + Info string +} + +func (err PacketDecodingError) Error() string { + return fmt.Sprintf("kafka: error decoding packet: %s", err.Info) +} + +// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer) +// when the specified configuration is invalid. +type ConfigurationError string + +func (err ConfigurationError) Error() string { + return "kafka: invalid configuration (" + string(err) + ")" +} + +// KError is the type of error that can be returned directly by the Kafka broker. +// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes +type KError int16 + +// Numeric error codes returned by the Kafka server. +const ( + ErrNoError KError = 0 + ErrUnknown KError = -1 + ErrOffsetOutOfRange KError = 1 + ErrInvalidMessage KError = 2 + ErrUnknownTopicOrPartition KError = 3 + ErrInvalidMessageSize KError = 4 + ErrLeaderNotAvailable KError = 5 + ErrNotLeaderForPartition KError = 6 + ErrRequestTimedOut KError = 7 + ErrBrokerNotAvailable KError = 8 + ErrReplicaNotAvailable KError = 9 + ErrMessageSizeTooLarge KError = 10 + ErrStaleControllerEpochCode KError = 11 + ErrOffsetMetadataTooLarge KError = 12 + ErrNetworkException KError = 13 + ErrOffsetsLoadInProgress KError = 14 + ErrConsumerCoordinatorNotAvailable KError = 15 + ErrNotCoordinatorForConsumer KError = 16 + ErrInvalidTopic KError = 17 + ErrMessageSetSizeTooLarge KError = 18 + ErrNotEnoughReplicas KError = 19 + ErrNotEnoughReplicasAfterAppend KError = 20 + ErrInvalidRequiredAcks KError = 21 + ErrIllegalGeneration KError = 22 + ErrInconsistentGroupProtocol KError = 23 + ErrInvalidGroupId KError = 24 + ErrUnknownMemberId KError = 25 + ErrInvalidSessionTimeout KError = 26 + ErrRebalanceInProgress KError = 27 + ErrInvalidCommitOffsetSize KError = 28 + ErrTopicAuthorizationFailed KError = 29 + ErrGroupAuthorizationFailed KError = 30 + ErrClusterAuthorizationFailed KError = 31 + ErrInvalidTimestamp KError = 32 + ErrUnsupportedSASLMechanism KError = 33 + ErrIllegalSASLState KError = 34 + ErrUnsupportedVersion KError = 35 + ErrTopicAlreadyExists KError = 36 + ErrInvalidPartitions KError = 37 + ErrInvalidReplicationFactor KError = 38 + ErrInvalidReplicaAssignment KError = 39 + ErrInvalidConfig KError = 40 + ErrNotController KError = 41 + ErrInvalidRequest KError = 42 + ErrUnsupportedForMessageFormat KError = 43 + ErrPolicyViolation KError = 44 + ErrOutOfOrderSequenceNumber KError = 45 + ErrDuplicateSequenceNumber KError = 46 + ErrInvalidProducerEpoch KError = 47 + ErrInvalidTxnState KError = 48 + ErrInvalidProducerIDMapping KError = 49 + ErrInvalidTransactionTimeout KError = 50 + ErrConcurrentTransactions KError = 51 + ErrTransactionCoordinatorFenced KError = 52 + ErrTransactionalIDAuthorizationFailed KError = 53 + ErrSecurityDisabled KError = 54 + ErrOperationNotAttempted KError = 55 + ErrKafkaStorageError KError = 56 + ErrLogDirNotFound KError = 57 + ErrSASLAuthenticationFailed KError = 58 + ErrUnknownProducerID KError = 59 + ErrReassignmentInProgress KError = 60 +) + +func (err KError) Error() string { + // Error messages stolen/adapted from + // https://kafka.apache.org/protocol#protocol_error_codes + switch err { + case ErrNoError: + return "kafka server: Not an error, why are you printing me?" + case ErrUnknown: + return "kafka server: Unexpected (unknown?) server error." + case ErrOffsetOutOfRange: + return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition." + case ErrInvalidMessage: + return "kafka server: Message contents does not match its CRC." + case ErrUnknownTopicOrPartition: + return "kafka server: Request was for a topic or partition that does not exist on this broker." + case ErrInvalidMessageSize: + return "kafka server: The message has a negative size." + case ErrLeaderNotAvailable: + return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes." + case ErrNotLeaderForPartition: + return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date." + case ErrRequestTimedOut: + return "kafka server: Request exceeded the user-specified time limit in the request." + case ErrBrokerNotAvailable: + return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" + case ErrReplicaNotAvailable: + return "kafka server: Replica information not available, one or more brokers are down." + case ErrMessageSizeTooLarge: + return "kafka server: Message was too large, server rejected it to avoid allocation error." + case ErrStaleControllerEpochCode: + return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)." + case ErrOffsetMetadataTooLarge: + return "kafka server: Specified a string larger than the configured maximum for offset metadata." + case ErrNetworkException: + return "kafka server: The server disconnected before a response was received." + case ErrOffsetsLoadInProgress: + return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition." + case ErrConsumerCoordinatorNotAvailable: + return "kafka server: Offset's topic has not yet been created." + case ErrNotCoordinatorForConsumer: + return "kafka server: Request was for a consumer group that is not coordinated by this broker." + case ErrInvalidTopic: + return "kafka server: The request attempted to perform an operation on an invalid topic." + case ErrMessageSetSizeTooLarge: + return "kafka server: The request included message batch larger than the configured segment size on the server." + case ErrNotEnoughReplicas: + return "kafka server: Messages are rejected since there are fewer in-sync replicas than required." + case ErrNotEnoughReplicasAfterAppend: + return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required." + case ErrInvalidRequiredAcks: + return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)." + case ErrIllegalGeneration: + return "kafka server: The provided generation id is not the current generation." + case ErrInconsistentGroupProtocol: + return "kafka server: The provider group protocol type is incompatible with the other members." + case ErrInvalidGroupId: + return "kafka server: The provided group id was empty." + case ErrUnknownMemberId: + return "kafka server: The provided member is not known in the current generation." + case ErrInvalidSessionTimeout: + return "kafka server: The provided session timeout is outside the allowed range." + case ErrRebalanceInProgress: + return "kafka server: A rebalance for the group is in progress. Please re-join the group." + case ErrInvalidCommitOffsetSize: + return "kafka server: The provided commit metadata was too large." + case ErrTopicAuthorizationFailed: + return "kafka server: The client is not authorized to access this topic." + case ErrGroupAuthorizationFailed: + return "kafka server: The client is not authorized to access this group." + case ErrClusterAuthorizationFailed: + return "kafka server: The client is not authorized to send this request type." + case ErrInvalidTimestamp: + return "kafka server: The timestamp of the message is out of acceptable range." + case ErrUnsupportedSASLMechanism: + return "kafka server: The broker does not support the requested SASL mechanism." + case ErrIllegalSASLState: + return "kafka server: Request is not valid given the current SASL state." + case ErrUnsupportedVersion: + return "kafka server: The version of API is not supported." + case ErrTopicAlreadyExists: + return "kafka server: Topic with this name already exists." + case ErrInvalidPartitions: + return "kafka server: Number of partitions is invalid." + case ErrInvalidReplicationFactor: + return "kafka server: Replication-factor is invalid." + case ErrInvalidReplicaAssignment: + return "kafka server: Replica assignment is invalid." + case ErrInvalidConfig: + return "kafka server: Configuration is invalid." + case ErrNotController: + return "kafka server: This is not the correct controller for this cluster." + case ErrInvalidRequest: + return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details." + case ErrUnsupportedForMessageFormat: + return "kafka server: The requested operation is not supported by the message format version." + case ErrPolicyViolation: + return "kafka server: Request parameters do not satisfy the configured policy." + case ErrOutOfOrderSequenceNumber: + return "kafka server: The broker received an out of order sequence number." + case ErrDuplicateSequenceNumber: + return "kafka server: The broker received a duplicate sequence number." + case ErrInvalidProducerEpoch: + return "kafka server: Producer attempted an operation with an old epoch." + case ErrInvalidTxnState: + return "kafka server: The producer attempted a transactional operation in an invalid state." + case ErrInvalidProducerIDMapping: + return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id." + case ErrInvalidTransactionTimeout: + return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)." + case ErrConcurrentTransactions: + return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing." + case ErrTransactionCoordinatorFenced: + return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer." + case ErrTransactionalIDAuthorizationFailed: + return "kafka server: Transactional ID authorization failed." + case ErrSecurityDisabled: + return "kafka server: Security features are disabled." + case ErrOperationNotAttempted: + return "kafka server: The broker did not attempt to execute this operation." + case ErrKafkaStorageError: + return "kafka server: Disk error when trying to access log file on the disk." + case ErrLogDirNotFound: + return "kafka server: The specified log directory is not found in the broker config." + case ErrSASLAuthenticationFailed: + return "kafka server: SASL Authentication failed." + case ErrUnknownProducerID: + return "kafka server: The broker could not locate the producer metadata associated with the Producer ID." + case ErrReassignmentInProgress: + return "kafka server: A partition reassignment is in progress." + } + + return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) +} diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go new file mode 100644 index 00000000000..462ab8afbb8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/fetch_request.go @@ -0,0 +1,170 @@ +package sarama + +type fetchRequestBlock struct { + fetchOffset int64 + maxBytes int32 +} + +func (b *fetchRequestBlock) encode(pe packetEncoder) error { + pe.putInt64(b.fetchOffset) + pe.putInt32(b.maxBytes) + return nil +} + +func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { + if b.fetchOffset, err = pd.getInt64(); err != nil { + return err + } + if b.maxBytes, err = pd.getInt32(); err != nil { + return err + } + return nil +} + +// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See +// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes +type FetchRequest struct { + MaxWaitTime int32 + MinBytes int32 + MaxBytes int32 + Version int16 + Isolation IsolationLevel + blocks map[string]map[int32]*fetchRequestBlock +} + +type IsolationLevel int8 + +const ( + ReadUncommitted IsolationLevel = 0 + ReadCommitted IsolationLevel = 1 +) + +func (r *FetchRequest) encode(pe packetEncoder) (err error) { + pe.putInt32(-1) // replica ID is always -1 for clients + pe.putInt32(r.MaxWaitTime) + pe.putInt32(r.MinBytes) + if r.Version >= 3 { + pe.putInt32(r.MaxBytes) + } + if r.Version >= 4 { + pe.putInt8(int8(r.Isolation)) + } + err = pe.putArrayLength(len(r.blocks)) + if err != nil { + return err + } + for topic, blocks := range r.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(blocks)) + if err != nil { + return err + } + for partition, block := range blocks { + pe.putInt32(partition) + err = block.encode(pe) + if err != nil { + return err + } + } + } + return nil +} + +func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if _, err = pd.getInt32(); err != nil { + return err + } + if r.MaxWaitTime, err = pd.getInt32(); err != nil { + return err + } + if r.MinBytes, err = pd.getInt32(); err != nil { + return err + } + if r.Version >= 3 { + if r.MaxBytes, err = pd.getInt32(); err != nil { + return err + } + } + if r.Version >= 4 { + isolation, err := pd.getInt8() + if err != nil { + return err + } + r.Isolation = IsolationLevel(isolation) + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*fetchRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*fetchRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + fetchBlock := &fetchRequestBlock{} + if err = fetchBlock.decode(pd); err != nil { + return err + } + r.blocks[topic][partition] = fetchBlock + } + } + return nil +} + +func (r *FetchRequest) key() int16 { + return 1 +} + +func (r *FetchRequest) version() int16 { + return r.Version +} + +func (r *FetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_10_1_0 + case 4: + return V0_11_0_0 + default: + return MinVersion + } +} + +func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*fetchRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*fetchRequestBlock) + } + + tmp := new(fetchRequestBlock) + tmp.maxBytes = maxBytes + tmp.fetchOffset = fetchOffset + + r.blocks[topic][partitionID] = tmp +} diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go new file mode 100644 index 00000000000..0ab5cb5ff57 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go @@ -0,0 +1,61 @@ +package sarama + +type CoordinatorType int8 + +const ( + CoordinatorGroup CoordinatorType = 0 + CoordinatorTransaction CoordinatorType = 1 +) + +type FindCoordinatorRequest struct { + Version int16 + CoordinatorKey string + CoordinatorType CoordinatorType +} + +func (f *FindCoordinatorRequest) encode(pe packetEncoder) error { + if err := pe.putString(f.CoordinatorKey); err != nil { + return err + } + + if f.Version >= 1 { + pe.putInt8(int8(f.CoordinatorType)) + } + + return nil +} + +func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) { + if f.CoordinatorKey, err = pd.getString(); err != nil { + return err + } + + if version >= 1 { + f.Version = version + coordinatorType, err := pd.getInt8() + if err != nil { + return err + } + + f.CoordinatorType = CoordinatorType(coordinatorType) + } + + return nil +} + +func (f *FindCoordinatorRequest) key() int16 { + return 10 +} + +func (f *FindCoordinatorRequest) version() int16 { + return f.Version +} + +func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion { + switch f.Version { + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go new file mode 100644 index 00000000000..9c900e8b774 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/find_coordinator_response.go @@ -0,0 +1,92 @@ +package sarama + +import ( + "time" +) + +var NoNode = &Broker{id: -1, addr: ":-1"} + +type FindCoordinatorResponse struct { + Version int16 + ThrottleTime time.Duration + Err KError + ErrMsg *string + Coordinator *Broker +} + +func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 1 { + f.Version = version + + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + f.Err = KError(tmp) + + if version >= 1 { + if f.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + } + + coordinator := new(Broker) + // The version is hardcoded to 0, as version 1 of the Broker-decode + // contains the rack-field which is not present in the FindCoordinatorResponse. + if err := coordinator.decode(pd, 0); err != nil { + return err + } + if coordinator.addr == ":0" { + return nil + } + f.Coordinator = coordinator + + return nil +} + +func (f *FindCoordinatorResponse) encode(pe packetEncoder) error { + if f.Version >= 1 { + pe.putInt32(int32(f.ThrottleTime / time.Millisecond)) + } + + pe.putInt16(int16(f.Err)) + + if f.Version >= 1 { + if err := pe.putNullableString(f.ErrMsg); err != nil { + return err + } + } + + coordinator := f.Coordinator + if coordinator == nil { + coordinator = NoNode + } + if err := coordinator.encode(pe, 0); err != nil { + return err + } + return nil +} + +func (f *FindCoordinatorResponse) key() int16 { + return 10 +} + +func (f *FindCoordinatorResponse) version() int16 { + return f.Version +} + +func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion { + switch f.Version { + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go new file mode 100644 index 00000000000..ce49c473972 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go @@ -0,0 +1,47 @@ +package sarama + +type HeartbeatRequest struct { + GroupId string + GenerationId int32 + MemberId string +} + +func (r *HeartbeatRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.MemberId); err != nil { + return err + } + + return nil +} + +func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + return nil +} + +func (r *HeartbeatRequest) key() int16 { + return 12 +} + +func (r *HeartbeatRequest) version() int16 { + return 0 +} + +func (r *HeartbeatRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go new file mode 100644 index 00000000000..766f5fdec6f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go @@ -0,0 +1,32 @@ +package sarama + +type HeartbeatResponse struct { + Err KError +} + +func (r *HeartbeatResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return nil +} + +func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + + return nil +} + +func (r *HeartbeatResponse) key() int16 { + return 12 +} + +func (r *HeartbeatResponse) version() int16 { + return 0 +} + +func (r *HeartbeatResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go new file mode 100644 index 00000000000..8ceb6c23255 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go @@ -0,0 +1,43 @@ +package sarama + +import "time" + +type InitProducerIDRequest struct { + TransactionalID *string + TransactionTimeout time.Duration +} + +func (i *InitProducerIDRequest) encode(pe packetEncoder) error { + if err := pe.putNullableString(i.TransactionalID); err != nil { + return err + } + pe.putInt32(int32(i.TransactionTimeout / time.Millisecond)) + + return nil +} + +func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) { + if i.TransactionalID, err = pd.getNullableString(); err != nil { + return err + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + i.TransactionTimeout = time.Duration(timeout) * time.Millisecond + + return nil +} + +func (i *InitProducerIDRequest) key() int16 { + return 22 +} + +func (i *InitProducerIDRequest) version() int16 { + return 0 +} + +func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go new file mode 100644 index 00000000000..1b32eb085b2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go @@ -0,0 +1,55 @@ +package sarama + +import "time" + +type InitProducerIDResponse struct { + ThrottleTime time.Duration + Err KError + ProducerID int64 + ProducerEpoch int16 +} + +func (i *InitProducerIDResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(i.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(i.Err)) + pe.putInt64(i.ProducerID) + pe.putInt16(i.ProducerEpoch) + + return nil +} + +func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + i.Err = KError(kerr) + + if i.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if i.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + return nil +} + +func (i *InitProducerIDResponse) key() int16 { + return 22 +} + +func (i *InitProducerIDResponse) version() int16 { + return 0 +} + +func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go new file mode 100644 index 00000000000..97e9299ea1a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/join_group_request.go @@ -0,0 +1,163 @@ +package sarama + +type GroupProtocol struct { + Name string + Metadata []byte +} + +func (p *GroupProtocol) decode(pd packetDecoder) (err error) { + p.Name, err = pd.getString() + if err != nil { + return err + } + p.Metadata, err = pd.getBytes() + return err +} + +func (p *GroupProtocol) encode(pe packetEncoder) (err error) { + if err := pe.putString(p.Name); err != nil { + return err + } + if err := pe.putBytes(p.Metadata); err != nil { + return err + } + return nil +} + +type JoinGroupRequest struct { + Version int16 + GroupId string + SessionTimeout int32 + RebalanceTimeout int32 + MemberId string + ProtocolType string + GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols + OrderedGroupProtocols []*GroupProtocol +} + +func (r *JoinGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + pe.putInt32(r.SessionTimeout) + if r.Version >= 1 { + pe.putInt32(r.RebalanceTimeout) + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + if err := pe.putString(r.ProtocolType); err != nil { + return err + } + + if len(r.GroupProtocols) > 0 { + if len(r.OrderedGroupProtocols) > 0 { + return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"} + } + + if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { + return err + } + for name, metadata := range r.GroupProtocols { + if err := pe.putString(name); err != nil { + return err + } + if err := pe.putBytes(metadata); err != nil { + return err + } + } + } else { + if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil { + return err + } + for _, protocol := range r.OrderedGroupProtocols { + if err := protocol.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.GroupId, err = pd.getString(); err != nil { + return + } + + if r.SessionTimeout, err = pd.getInt32(); err != nil { + return + } + + if version >= 1 { + if r.RebalanceTimeout, err = pd.getInt32(); err != nil { + return err + } + } + + if r.MemberId, err = pd.getString(); err != nil { + return + } + + if r.ProtocolType, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupProtocols = make(map[string][]byte) + for i := 0; i < n; i++ { + protocol := &GroupProtocol{} + if err := protocol.decode(pd); err != nil { + return err + } + r.GroupProtocols[protocol.Name] = protocol.Metadata + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol) + } + + return nil +} + +func (r *JoinGroupRequest) key() int16 { + return 11 +} + +func (r *JoinGroupRequest) version() int16 { + return r.Version +} + +func (r *JoinGroupRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 2: + return V0_11_0_0 + case 1: + return V0_10_1_0 + default: + return V0_9_0_0 + } +} + +func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{ + Name: name, + Metadata: metadata, + }) +} + +func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { + bin, err := encode(metadata, nil) + if err != nil { + return err + } + + r.AddGroupProtocol(name, bin) + return nil +} diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go new file mode 100644 index 00000000000..5752acc8aeb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/join_group_response.go @@ -0,0 +1,135 @@ +package sarama + +type JoinGroupResponse struct { + Version int16 + ThrottleTime int32 + Err KError + GenerationId int32 + GroupProtocol string + LeaderId string + MemberId string + Members map[string][]byte +} + +func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { + members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members)) + for id, bin := range r.Members { + meta := new(ConsumerGroupMemberMetadata) + if err := decode(bin, meta); err != nil { + return nil, err + } + members[id] = *meta + } + return members, nil +} + +func (r *JoinGroupResponse) encode(pe packetEncoder) error { + if r.Version >= 2 { + pe.putInt32(r.ThrottleTime) + } + pe.putInt16(int16(r.Err)) + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.GroupProtocol); err != nil { + return err + } + if err := pe.putString(r.LeaderId); err != nil { + return err + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Members)); err != nil { + return err + } + + for memberId, memberMetadata := range r.Members { + if err := pe.putString(memberId); err != nil { + return err + } + + if err := pe.putBytes(memberMetadata); err != nil { + return err + } + } + + return nil +} + +func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if version >= 2 { + if r.ThrottleTime, err = pd.getInt32(); err != nil { + return + } + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + + if r.GroupProtocol, err = pd.getString(); err != nil { + return + } + + if r.LeaderId, err = pd.getString(); err != nil { + return + } + + if r.MemberId, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.Members = make(map[string][]byte) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + + memberMetadata, err := pd.getBytes() + if err != nil { + return err + } + + r.Members[memberId] = memberMetadata + } + + return nil +} + +func (r *JoinGroupResponse) key() int16 { + return 11 +} + +func (r *JoinGroupResponse) version() int16 { + return r.Version +} + +func (r *JoinGroupResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 2: + return V0_11_0_0 + case 1: + return V0_10_1_0 + default: + return V0_9_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go new file mode 100644 index 00000000000..e177427482f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/leave_group_request.go @@ -0,0 +1,40 @@ +package sarama + +type LeaveGroupRequest struct { + GroupId string + MemberId string +} + +func (r *LeaveGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + + return nil +} + +func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + return nil +} + +func (r *LeaveGroupRequest) key() int16 { + return 13 +} + +func (r *LeaveGroupRequest) version() int16 { + return 0 +} + +func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go new file mode 100644 index 00000000000..d60c626da01 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/leave_group_response.go @@ -0,0 +1,32 @@ +package sarama + +type LeaveGroupResponse struct { + Err KError +} + +func (r *LeaveGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return nil +} + +func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + + return nil +} + +func (r *LeaveGroupResponse) key() int16 { + return 13 +} + +func (r *LeaveGroupResponse) version() int16 { + return 0 +} + +func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go new file mode 100644 index 00000000000..3b16abf7fa8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_groups_request.go @@ -0,0 +1,24 @@ +package sarama + +type ListGroupsRequest struct { +} + +func (r *ListGroupsRequest) encode(pe packetEncoder) error { + return nil +} + +func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + return nil +} + +func (r *ListGroupsRequest) key() int16 { + return 16 +} + +func (r *ListGroupsRequest) version() int16 { + return 0 +} + +func (r *ListGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go new file mode 100644 index 00000000000..56115d4c75a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_groups_response.go @@ -0,0 +1,69 @@ +package sarama + +type ListGroupsResponse struct { + Err KError + Groups map[string]string +} + +func (r *ListGroupsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + for groupId, protocolType := range r.Groups { + if err := pe.putString(groupId); err != nil { + return err + } + if err := pe.putString(protocolType); err != nil { + return err + } + } + + return nil +} + +func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.Groups = make(map[string]string) + for i := 0; i < n; i++ { + groupId, err := pd.getString() + if err != nil { + return err + } + protocolType, err := pd.getString() + if err != nil { + return err + } + + r.Groups[groupId] = protocolType + } + + return nil +} + +func (r *ListGroupsResponse) key() int16 { + return 16 +} + +func (r *ListGroupsResponse) version() int16 { + return 0 +} + +func (r *ListGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go new file mode 100644 index 00000000000..fecdbfdef75 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/message.go @@ -0,0 +1,223 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "time" + + "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +// CompressionCodec represents the various compression codecs recognized by Kafka in messages. +type CompressionCodec int8 + +// only the last two bits are really used +const compressionCodecMask int8 = 0x03 + +const ( + CompressionNone CompressionCodec = 0 + CompressionGZIP CompressionCodec = 1 + CompressionSnappy CompressionCodec = 2 + CompressionLZ4 CompressionCodec = 3 +) + +func (cc CompressionCodec) String() string { + return []string{ + "none", + "gzip", + "snappy", + "lz4", + }[int(cc)] +} + +// CompressionLevelDefault is the constant to use in CompressionLevel +// to have the default compression level for any codec. The value is picked +// that we don't use any existing compression levels. +const CompressionLevelDefault = -1000 + +type Message struct { + Codec CompressionCodec // codec used to compress the message contents + CompressionLevel int // compression level + Key []byte // the message key, may be nil + Value []byte // the message contents + Set *MessageSet // the message set a message might wrap + Version int8 // v1 requires Kafka 0.10 + Timestamp time.Time // the timestamp of the message (version 1+ only) + + compressedCache []byte + compressedSize int // used for computing the compression ratio metrics +} + +func (m *Message) encode(pe packetEncoder) error { + pe.push(newCRC32Field(crcIEEE)) + + pe.putInt8(m.Version) + + attributes := int8(m.Codec) & compressionCodecMask + pe.putInt8(attributes) + + if m.Version >= 1 { + if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil { + return err + } + } + + err := pe.putBytes(m.Key) + if err != nil { + return err + } + + var payload []byte + + if m.compressedCache != nil { + payload = m.compressedCache + m.compressedCache = nil + } else if m.Value != nil { + switch m.Codec { + case CompressionNone: + payload = m.Value + case CompressionGZIP: + var buf bytes.Buffer + var writer *gzip.Writer + if m.CompressionLevel != CompressionLevelDefault { + writer, err = gzip.NewWriterLevel(&buf, m.CompressionLevel) + if err != nil { + return err + } + } else { + writer = gzip.NewWriter(&buf) + } + if _, err = writer.Write(m.Value); err != nil { + return err + } + if err = writer.Close(); err != nil { + return err + } + m.compressedCache = buf.Bytes() + payload = m.compressedCache + case CompressionSnappy: + tmp := snappy.Encode(m.Value) + m.compressedCache = tmp + payload = m.compressedCache + case CompressionLZ4: + var buf bytes.Buffer + writer := lz4.NewWriter(&buf) + if _, err = writer.Write(m.Value); err != nil { + return err + } + if err = writer.Close(); err != nil { + return err + } + m.compressedCache = buf.Bytes() + payload = m.compressedCache + + default: + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)} + } + // Keep in mind the compressed payload size for metric gathering + m.compressedSize = len(payload) + } + + if err = pe.putBytes(payload); err != nil { + return err + } + + return pe.pop() +} + +func (m *Message) decode(pd packetDecoder) (err error) { + err = pd.push(newCRC32Field(crcIEEE)) + if err != nil { + return err + } + + m.Version, err = pd.getInt8() + if err != nil { + return err + } + + if m.Version > 1 { + return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)} + } + + attribute, err := pd.getInt8() + if err != nil { + return err + } + m.Codec = CompressionCodec(attribute & compressionCodecMask) + + if m.Version == 1 { + if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil { + return err + } + } + + m.Key, err = pd.getBytes() + if err != nil { + return err + } + + m.Value, err = pd.getBytes() + if err != nil { + return err + } + + // Required for deep equal assertion during tests but might be useful + // for future metrics about the compression ratio in fetch requests + m.compressedSize = len(m.Value) + + switch m.Codec { + case CompressionNone: + // nothing to do + case CompressionGZIP: + if m.Value == nil { + break + } + reader, err := gzip.NewReader(bytes.NewReader(m.Value)) + if err != nil { + return err + } + if m.Value, err = ioutil.ReadAll(reader); err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + case CompressionSnappy: + if m.Value == nil { + break + } + if m.Value, err = snappy.Decode(m.Value); err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + case CompressionLZ4: + if m.Value == nil { + break + } + reader := lz4.NewReader(bytes.NewReader(m.Value)) + if m.Value, err = ioutil.ReadAll(reader); err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + + default: + return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} + } + + return pd.pop() +} + +// decodes a message set from a previousy encoded bulk-message +func (m *Message) decodeSet() (err error) { + pd := realDecoder{raw: m.Value} + m.Set = &MessageSet{} + return m.Set.decode(&pd) +} diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go new file mode 100644 index 00000000000..4869708e944 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metrics.go @@ -0,0 +1,51 @@ +package sarama + +import ( + "fmt" + "strings" + + "github.com/rcrowley/go-metrics" +) + +// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library: +// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution, +// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements. +// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38 +const ( + metricsReservoirSize = 1028 + metricsAlphaFactor = 0.015 +) + +func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram { + return r.GetOrRegister(name, func() metrics.Histogram { + return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor)) + }).(metrics.Histogram) +} + +func getMetricNameForBroker(name string, broker *Broker) string { + // Use broker id like the Java client as it does not contain '.' or ':' characters that + // can be interpreted as special character by monitoring tool (e.g. Graphite) + return fmt.Sprintf(name+"-for-broker-%d", broker.ID()) +} + +func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter { + return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r) +} + +func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram { + return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r) +} + +func getMetricNameForTopic(name string, topic string) string { + // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy + // cf. KAFKA-1902 and KAFKA-2337 + return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1)) +} + +func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter { + return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r) +} + +func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram { + return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r) +} diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go new file mode 100644 index 00000000000..55ef1e2920f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockbroker.go @@ -0,0 +1,330 @@ +package sarama + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "reflect" + "strconv" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" +) + +const ( + expectationTimeout = 500 * time.Millisecond +) + +type requestHandlerFunc func(req *request) (res encoder) + +// RequestNotifierFunc is invoked when a mock broker processes a request successfully +// and will provides the number of bytes read and written. +type RequestNotifierFunc func(bytesRead, bytesWritten int) + +// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed +// to facilitate testing of higher level or specialized consumers and producers +// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol, +// but rather provides a facility to do that. It takes care of the TCP +// transport, request unmarshaling, response marshaling, and makes it the test +// writer responsibility to program correct according to the Kafka API protocol +// MockBroker behaviour. +// +// MockBroker is implemented as a TCP server listening on a kernel-selected +// localhost port that can accept many connections. It reads Kafka requests +// from that connection and returns responses programmed by the SetHandlerByMap +// function. If a MockBroker receives a request that it has no programmed +// response for, then it returns nothing and the request times out. +// +// A set of MockRequest builders to define mappings used by MockBroker is +// provided by Sarama. But users can develop MockRequests of their own and use +// them along with or instead of the standard ones. +// +// When running tests with MockBroker it is strongly recommended to specify +// a timeout to `go test` so that if the broker hangs waiting for a response, +// the test panics. +// +// It is not necessary to prefix message length or correlation ID to your +// response bytes, the server does that automatically as a convenience. +type MockBroker struct { + brokerID int32 + port int32 + closing chan none + stopper chan none + expectations chan encoder + listener net.Listener + t TestReporter + latency time.Duration + handler requestHandlerFunc + notifier RequestNotifierFunc + history []RequestResponse + lock sync.Mutex +} + +// RequestResponse represents a Request/Response pair processed by MockBroker. +type RequestResponse struct { + Request protocolBody + Response encoder +} + +// SetLatency makes broker pause for the specified period every time before +// replying. +func (b *MockBroker) SetLatency(latency time.Duration) { + b.latency = latency +} + +// SetHandlerByMap defines mapping of Request types to MockResponses. When a +// request is received by the broker, it looks up the request type in the map +// and uses the found MockResponse instance to generate an appropriate reply. +// If the request type is not found in the map then nothing is sent. +func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { + b.setHandler(func(req *request) (res encoder) { + reqTypeName := reflect.TypeOf(req.body).Elem().Name() + mockResponse := handlerMap[reqTypeName] + if mockResponse == nil { + return nil + } + return mockResponse.For(req.body) + }) +} + +// SetNotifier set a function that will get invoked whenever a request has been +// processed successfully and will provide the number of bytes read and written +func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) { + b.lock.Lock() + b.notifier = notifier + b.lock.Unlock() +} + +// BrokerID returns broker ID assigned to the broker. +func (b *MockBroker) BrokerID() int32 { + return b.brokerID +} + +// History returns a slice of RequestResponse pairs in the order they were +// processed by the broker. Note that in case of multiple connections to the +// broker the order expected by a test can be different from the order recorded +// in the history, unless some synchronization is implemented in the test. +func (b *MockBroker) History() []RequestResponse { + b.lock.Lock() + history := make([]RequestResponse, len(b.history)) + copy(history, b.history) + b.lock.Unlock() + return history +} + +// Port returns the TCP port number the broker is listening for requests on. +func (b *MockBroker) Port() int32 { + return b.port +} + +// Addr returns the broker connection string in the form "
:". +func (b *MockBroker) Addr() string { + return b.listener.Addr().String() +} + +// Close terminates the broker blocking until it stops internal goroutines and +// releases all resources. +func (b *MockBroker) Close() { + close(b.expectations) + if len(b.expectations) > 0 { + buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID())) + for e := range b.expectations { + _, _ = buf.WriteString(spew.Sdump(e)) + } + b.t.Error(buf.String()) + } + close(b.closing) + <-b.stopper +} + +// setHandler sets the specified function as the request handler. Whenever +// a mock broker reads a request from the wire it passes the request to the +// function and sends back whatever the handler function returns. +func (b *MockBroker) setHandler(handler requestHandlerFunc) { + b.lock.Lock() + b.handler = handler + b.lock.Unlock() +} + +func (b *MockBroker) serverLoop() { + defer close(b.stopper) + var err error + var conn net.Conn + + go func() { + <-b.closing + err := b.listener.Close() + if err != nil { + b.t.Error(err) + } + }() + + wg := &sync.WaitGroup{} + i := 0 + for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() { + wg.Add(1) + go b.handleRequests(conn, i, wg) + i++ + } + wg.Wait() + Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) +} + +func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) { + defer wg.Done() + defer func() { + _ = conn.Close() + }() + Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx) + var err error + + abort := make(chan none) + defer close(abort) + go func() { + select { + case <-b.closing: + _ = conn.Close() + case <-abort: + } + }() + + resHeader := make([]byte, 8) + for { + req, bytesRead, err := decodeRequest(conn) + if err != nil { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) + b.serverError(err) + break + } + + if b.latency > 0 { + time.Sleep(b.latency) + } + + b.lock.Lock() + res := b.handler(req) + b.history = append(b.history, RequestResponse{req.body, res}) + b.lock.Unlock() + + if res == nil { + Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) + continue + } + Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) + + encodedRes, err := encode(res, nil) + if err != nil { + b.serverError(err) + break + } + if len(encodedRes) == 0 { + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, 0) + } + b.lock.Unlock() + continue + } + + binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4)) + binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID)) + if _, err = conn.Write(resHeader); err != nil { + b.serverError(err) + break + } + if _, err = conn.Write(encodedRes); err != nil { + b.serverError(err) + break + } + + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, len(resHeader)+len(encodedRes)) + } + b.lock.Unlock() + } + Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) +} + +func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) { + select { + case res, ok := <-b.expectations: + if !ok { + return nil + } + return res + case <-time.After(expectationTimeout): + return nil + } +} + +func (b *MockBroker) serverError(err error) { + isConnectionClosedError := false + if _, ok := err.(*net.OpError); ok { + isConnectionClosedError = true + } else if err == io.EOF { + isConnectionClosedError = true + } else if err.Error() == "use of closed network connection" { + isConnectionClosedError = true + } + + if isConnectionClosedError { + return + } + + b.t.Errorf(err.Error()) +} + +// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the +// test framework and a channel of responses to use. If an error occurs it is +// simply logged to the TestReporter and the broker exits. +func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { + return NewMockBrokerAddr(t, brokerID, "localhost:0") +} + +// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give +// it rather than just some ephemeral port. +func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { + listener, err := net.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + return NewMockBrokerListener(t, brokerID, listener) +} + +// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified. +func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker { + var err error + + broker := &MockBroker{ + closing: make(chan none), + stopper: make(chan none), + t: t, + brokerID: brokerID, + expectations: make(chan encoder, 512), + listener: listener, + } + broker.handler = broker.defaultRequestHandler + + Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) + _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + tmp, err := strconv.ParseInt(portStr, 10, 32) + if err != nil { + t.Fatal(err) + } + broker.port = int32(tmp) + + go broker.serverLoop() + + return broker +} + +func (b *MockBroker) Returns(e encoder) { + b.expectations <- e +} diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go new file mode 100644 index 00000000000..1720441996f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -0,0 +1,727 @@ +package sarama + +import ( + "fmt" +) + +// TestReporter has methods matching go's testing.T to avoid importing +// `testing` in the main part of the library. +type TestReporter interface { + Error(...interface{}) + Errorf(string, ...interface{}) + Fatal(...interface{}) + Fatalf(string, ...interface{}) +} + +// MockResponse is a response builder interface it defines one method that +// allows generating a response based on a request body. MockResponses are used +// to program behavior of MockBroker in tests. +type MockResponse interface { + For(reqBody versionedDecoder) (res encoder) +} + +// MockWrapper is a mock response builder that returns a particular concrete +// response regardless of the actual request passed to the `For` method. +type MockWrapper struct { + res encoder +} + +func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) { + return mw.res +} + +func NewMockWrapper(res encoder) *MockWrapper { + return &MockWrapper{res: res} +} + +// MockSequence is a mock response builder that is created from a sequence of +// concrete responses. Every time when a `MockBroker` calls its `For` method +// the next response from the sequence is returned. When the end of the +// sequence is reached the last element from the sequence is returned. +type MockSequence struct { + responses []MockResponse +} + +func NewMockSequence(responses ...interface{}) *MockSequence { + ms := &MockSequence{} + ms.responses = make([]MockResponse, len(responses)) + for i, res := range responses { + switch res := res.(type) { + case MockResponse: + ms.responses[i] = res + case encoder: + ms.responses[i] = NewMockWrapper(res) + default: + panic(fmt.Sprintf("Unexpected response type: %T", res)) + } + } + return ms +} + +func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { + res = mc.responses[0].For(reqBody) + if len(mc.responses) > 1 { + mc.responses = mc.responses[1:] + } + return res +} + +// MockMetadataResponse is a `MetadataResponse` builder. +type MockMetadataResponse struct { + controllerID int32 + leaders map[string]map[int32]int32 + brokers map[string]int32 + t TestReporter +} + +func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse { + return &MockMetadataResponse{ + leaders: make(map[string]map[int32]int32), + brokers: make(map[string]int32), + t: t, + } +} + +func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse { + partitions := mmr.leaders[topic] + if partitions == nil { + partitions = make(map[int32]int32) + mmr.leaders[topic] = partitions + } + partitions[partition] = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse { + mmr.brokers[addr] = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse { + mmr.controllerID = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { + metadataRequest := reqBody.(*MetadataRequest) + metadataResponse := &MetadataResponse{ + Version: metadataRequest.version(), + ControllerID: mmr.controllerID, + } + for addr, brokerID := range mmr.brokers { + metadataResponse.AddBroker(addr, brokerID) + } + if len(metadataRequest.Topics) == 0 { + for topic, partitions := range mmr.leaders { + for partition, brokerID := range partitions { + metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + } + } + return metadataResponse + } + for _, topic := range metadataRequest.Topics { + for partition, brokerID := range mmr.leaders[topic] { + metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + } + } + return metadataResponse +} + +// MockOffsetResponse is an `OffsetResponse` builder. +type MockOffsetResponse struct { + offsets map[string]map[int32]map[int64]int64 + t TestReporter + version int16 +} + +func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { + return &MockOffsetResponse{ + offsets: make(map[string]map[int32]map[int64]int64), + t: t, + } +} + +func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse { + mor.version = version + return mor +} + +func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { + partitions := mor.offsets[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]int64) + mor.offsets[topic] = partitions + } + times := partitions[partition] + if times == nil { + times = make(map[int64]int64) + partitions[partition] = times + } + times[time] = offset + return mor +} + +func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder { + offsetRequest := reqBody.(*OffsetRequest) + offsetResponse := &OffsetResponse{Version: mor.version} + for topic, partitions := range offsetRequest.blocks { + for partition, block := range partitions { + offset := mor.getOffset(topic, partition, block.time) + offsetResponse.AddTopicPartition(topic, partition, offset) + } + } + return offsetResponse +} + +func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 { + partitions := mor.offsets[topic] + if partitions == nil { + mor.t.Errorf("missing topic: %s", topic) + } + times := partitions[partition] + if times == nil { + mor.t.Errorf("missing partition: %d", partition) + } + offset, ok := times[time] + if !ok { + mor.t.Errorf("missing time: %d", time) + } + return offset +} + +// MockFetchResponse is a `FetchResponse` builder. +type MockFetchResponse struct { + messages map[string]map[int32]map[int64]Encoder + highWaterMarks map[string]map[int32]int64 + t TestReporter + batchSize int + version int16 +} + +func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { + return &MockFetchResponse{ + messages: make(map[string]map[int32]map[int64]Encoder), + highWaterMarks: make(map[string]map[int32]int64), + t: t, + batchSize: batchSize, + } +} + +func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse { + mfr.version = version + return mfr +} + +func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { + partitions := mfr.messages[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]Encoder) + mfr.messages[topic] = partitions + } + messages := partitions[partition] + if messages == nil { + messages = make(map[int64]Encoder) + partitions[partition] = messages + } + messages[offset] = msg + return mfr +} + +func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + partitions = make(map[int32]int64) + mfr.highWaterMarks[topic] = partitions + } + partitions[partition] = offset + return mfr +} + +func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { + fetchRequest := reqBody.(*FetchRequest) + res := &FetchResponse{ + Version: mfr.version, + } + for topic, partitions := range fetchRequest.blocks { + for partition, block := range partitions { + initialOffset := block.fetchOffset + offset := initialOffset + maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition)) + for i := 0; i < mfr.batchSize && offset < maxOffset; { + msg := mfr.getMessage(topic, partition, offset) + if msg != nil { + res.AddMessage(topic, partition, nil, msg, offset) + i++ + } + offset++ + } + fb := res.GetBlock(topic, partition) + if fb == nil { + res.AddError(topic, partition, ErrNoError) + fb = res.GetBlock(topic, partition) + } + fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition) + } + } + return res +} + +func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder { + partitions := mfr.messages[topic] + if partitions == nil { + return nil + } + messages := partitions[partition] + if messages == nil { + return nil + } + return messages[offset] +} + +func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int { + partitions := mfr.messages[topic] + if partitions == nil { + return 0 + } + messages := partitions[partition] + if messages == nil { + return 0 + } + return len(messages) +} + +func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + return 0 + } + return partitions[partition] +} + +// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder. +type MockConsumerMetadataResponse struct { + coordinators map[string]interface{} + t TestReporter +} + +func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse { + return &MockConsumerMetadataResponse{ + coordinators: make(map[string]interface{}), + t: t, + } +} + +func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse { + mr.coordinators[group] = broker + return mr +} + +func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse { + mr.coordinators[group] = kerror + return mr +} + +func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*ConsumerMetadataRequest) + group := req.ConsumerGroup + res := &ConsumerMetadataResponse{} + v := mr.coordinators[group] + switch v := v.(type) { + case *MockBroker: + res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} + case KError: + res.Err = v + } + return res +} + +// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder. +type MockFindCoordinatorResponse struct { + groupCoordinators map[string]interface{} + transCoordinators map[string]interface{} + t TestReporter +} + +func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse { + return &MockFindCoordinatorResponse{ + groupCoordinators: make(map[string]interface{}), + transCoordinators: make(map[string]interface{}), + t: t, + } +} + +func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse { + switch coordinatorType { + case CoordinatorGroup: + mr.groupCoordinators[group] = broker + case CoordinatorTransaction: + mr.transCoordinators[group] = broker + } + return mr +} + +func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse { + switch coordinatorType { + case CoordinatorGroup: + mr.groupCoordinators[group] = kerror + case CoordinatorTransaction: + mr.transCoordinators[group] = kerror + } + return mr +} + +func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*FindCoordinatorRequest) + res := &FindCoordinatorResponse{} + var v interface{} + switch req.CoordinatorType { + case CoordinatorGroup: + v = mr.groupCoordinators[req.CoordinatorKey] + case CoordinatorTransaction: + v = mr.transCoordinators[req.CoordinatorKey] + } + switch v := v.(type) { + case *MockBroker: + res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} + case KError: + res.Err = v + } + return res +} + +// MockOffsetCommitResponse is a `OffsetCommitResponse` builder. +type MockOffsetCommitResponse struct { + errors map[string]map[string]map[int32]KError + t TestReporter +} + +func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse { + return &MockOffsetCommitResponse{t: t} +} + +func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[string]map[int32]KError) + } + topics := mr.errors[group] + if topics == nil { + topics = make(map[string]map[int32]KError) + mr.errors[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]KError) + topics[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*OffsetCommitRequest) + group := req.ConsumerGroup + res := &OffsetCommitResponse{} + for topic, partitions := range req.blocks { + for partition := range partitions { + res.AddError(topic, partition, mr.getError(group, topic, partition)) + } + } + return res +} + +func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError { + topics := mr.errors[group] + if topics == nil { + return ErrNoError + } + partitions := topics[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// MockProduceResponse is a `ProduceResponse` builder. +type MockProduceResponse struct { + version int16 + errors map[string]map[int32]KError + t TestReporter +} + +func NewMockProduceResponse(t TestReporter) *MockProduceResponse { + return &MockProduceResponse{t: t} +} + +func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse { + mr.version = version + return mr +} + +func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[int32]KError) + } + partitions := mr.errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + mr.errors[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*ProduceRequest) + res := &ProduceResponse{ + Version: mr.version, + } + for topic, partitions := range req.records { + for partition := range partitions { + res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) + } + } + return res +} + +func (mr *MockProduceResponse) getError(topic string, partition int32) KError { + partitions := mr.errors[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// MockOffsetFetchResponse is a `OffsetFetchResponse` builder. +type MockOffsetFetchResponse struct { + offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock + t TestReporter +} + +func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse { + return &MockOffsetFetchResponse{t: t} +} + +func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse { + if mr.offsets == nil { + mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock) + } + topics := mr.offsets[group] + if topics == nil { + topics = make(map[string]map[int32]*OffsetFetchResponseBlock) + mr.offsets[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + topics[topic] = partitions + } + partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror} + return mr +} + +func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*OffsetFetchRequest) + group := req.ConsumerGroup + res := &OffsetFetchResponse{} + for topic, partitions := range mr.offsets[group] { + for partition, block := range partitions { + res.AddBlock(topic, partition, block) + } + } + return res +} + +type MockCreateTopicsResponse struct { + t TestReporter +} + +func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse { + return &MockCreateTopicsResponse{t: t} +} + +func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*CreateTopicsRequest) + res := &CreateTopicsResponse{} + res.TopicErrors = make(map[string]*TopicError) + + for topic, _ := range req.TopicDetails { + res.TopicErrors[topic] = &TopicError{Err: ErrNoError} + } + return res +} + +type MockDeleteTopicsResponse struct { + t TestReporter +} + +func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse { + return &MockDeleteTopicsResponse{t: t} +} + +func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*DeleteTopicsRequest) + res := &DeleteTopicsResponse{} + res.TopicErrorCodes = make(map[string]KError) + + for _, topic := range req.Topics { + res.TopicErrorCodes[topic] = ErrNoError + } + return res +} + +type MockCreatePartitionsResponse struct { + t TestReporter +} + +func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse { + return &MockCreatePartitionsResponse{t: t} +} + +func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*CreatePartitionsRequest) + res := &CreatePartitionsResponse{} + res.TopicPartitionErrors = make(map[string]*TopicPartitionError) + + for topic, _ := range req.TopicPartitions { + res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError} + } + return res +} + +type MockDeleteRecordsResponse struct { + t TestReporter +} + +func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse { + return &MockDeleteRecordsResponse{t: t} +} + +func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*DeleteRecordsRequest) + res := &DeleteRecordsResponse{} + res.Topics = make(map[string]*DeleteRecordsResponseTopic) + + for topic, deleteRecordRequestTopic := range req.Topics { + partitions := make(map[int32]*DeleteRecordsResponsePartition) + for partition, _ := range deleteRecordRequestTopic.PartitionOffsets { + partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError} + } + res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions} + } + return res +} + +type MockDescribeConfigsResponse struct { + t TestReporter +} + +func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse { + return &MockDescribeConfigsResponse{t: t} +} + +func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*DescribeConfigsRequest) + res := &DescribeConfigsResponse{} + + var configEntries []*ConfigEntry + configEntries = append(configEntries, &ConfigEntry{Name: "my_topic", + Value: "my_topic", + ReadOnly: true, + Default: true, + Sensitive: false, + }) + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &ResourceResponse{Name: r.Name, Configs: configEntries}) + } + return res +} + +type MockAlterConfigsResponse struct { + t TestReporter +} + +func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse { + return &MockAlterConfigsResponse{t: t} +} + +func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*AlterConfigsRequest) + res := &AlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name, + Type: TopicResource, + ErrorMsg: "", + }) + } + return res +} + +type MockCreateAclsResponse struct { + t TestReporter +} + +func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse { + return &MockCreateAclsResponse{t: t} +} + +func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*CreateAclsRequest) + res := &CreateAclsResponse{} + + for range req.AclCreations { + res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError}) + } + return res +} + +type MockListAclsResponse struct { + t TestReporter +} + +func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse { + return &MockListAclsResponse{t: t} +} + +func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*DescribeAclsRequest) + res := &DescribeAclsResponse{} + + res.Err = ErrNoError + acl := &ResourceAcls{} + acl.Resource.ResourceName = *req.ResourceName + acl.Resource.ResourceType = req.ResourceType + acl.Acls = append(acl.Acls, &Acl{}) + res.ResourceAcls = append(res.ResourceAcls, acl) + + return res +} + +type MockDeleteAclsResponse struct { + t TestReporter +} + +func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse { + return &MockDeleteAclsResponse{t: t} +} + +func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*DeleteAclsRequest) + res := &DeleteAclsResponse{} + + for range req.Filters { + response := &FilterResponse{Err: ErrNoError} + response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError}) + res.FilterResponses = append(res.FilterResponses, response) + } + return res +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go new file mode 100644 index 00000000000..37e99fbf5b8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go @@ -0,0 +1,204 @@ +package sarama + +import "errors" + +// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which +// tells the broker to set the timestamp to the time at which the request was received. +// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2. +const ReceiveTime int64 = -1 + +// GroupGenerationUndefined is a special value for the group generation field of +// Offset Commit Requests that should be used when a consumer group does not rely +// on Kafka for partition management. +const GroupGenerationUndefined = -1 + +type offsetCommitRequestBlock struct { + offset int64 + timestamp int64 + metadata string +} + +func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(b.offset) + if version == 1 { + pe.putInt64(b.timestamp) + } else if b.timestamp != 0 { + Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") + } + + return pe.putString(b.metadata) +} + +func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if b.offset, err = pd.getInt64(); err != nil { + return err + } + if version == 1 { + if b.timestamp, err = pd.getInt64(); err != nil { + return err + } + } + b.metadata, err = pd.getString() + return err +} + +type OffsetCommitRequest struct { + ConsumerGroup string + ConsumerGroupGeneration int32 // v1 or later + ConsumerID string // v1 or later + RetentionTime int64 // v2 or later + + // Version can be: + // - 0 (kafka 0.8.1 and later) + // - 1 (kafka 0.8.2 and later) + // - 2 (kafka 0.9.0 and later) + Version int16 + blocks map[string]map[int32]*offsetCommitRequestBlock +} + +func (r *OffsetCommitRequest) encode(pe packetEncoder) error { + if r.Version < 0 || r.Version > 2 { + return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} + } + + if err := pe.putString(r.ConsumerGroup); err != nil { + return err + } + + if r.Version >= 1 { + pe.putInt32(r.ConsumerGroupGeneration) + if err := pe.putString(r.ConsumerID); err != nil { + return err + } + } else { + if r.ConsumerGroupGeneration != 0 { + Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored") + } + if r.ConsumerID != "" { + Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored") + } + } + + if r.Version >= 2 { + pe.putInt64(r.RetentionTime) + } else if r.RetentionTime != 0 { + Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored") + } + + if err := pe.putArrayLength(len(r.blocks)); err != nil { + return err + } + for topic, partitions := range r.blocks { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe, r.Version); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ConsumerGroup, err = pd.getString(); err != nil { + return err + } + + if r.Version >= 1 { + if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil { + return err + } + if r.ConsumerID, err = pd.getString(); err != nil { + return err + } + } + + if r.Version >= 2 { + if r.RetentionTime, err = pd.getInt64(); err != nil { + return err + } + } + + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetCommitRequestBlock{} + if err := block.decode(pd, r.Version); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetCommitRequest) key() int16 { + return 8 +} + +func (r *OffsetCommitRequest) version() int16 { + return r.Version +} + +func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_9_0_0 + default: + return MinVersion + } +} + +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + } + + r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} +} + +func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) { + partitions := r.blocks[topic] + if partitions == nil { + return 0, "", errors.New("No such offset") + } + block := partitions[partitionID] + if block == nil { + return 0, "", errors.New("No such offset") + } + return block.offset, block.metadata, nil +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go new file mode 100644 index 00000000000..a4b18acdff2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go @@ -0,0 +1,85 @@ +package sarama + +type OffsetCommitResponse struct { + Errors map[string]map[int32]KError +} + +func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]KError) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + r.Errors[topic] = partitions + } + partitions[partition] = kerror +} + +func (r *OffsetCommitResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Errors)); err != nil { + return err + } + for topic, partitions := range r.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, kerror := range partitions { + pe.putInt32(partition) + pe.putInt16(int16(kerror)) + } + } + return nil +} + +func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Errors = make(map[string]map[int32]KError, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numErrors, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Errors[name] = make(map[int32]KError, numErrors) + + for j := 0; j < numErrors; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Errors[name][id] = KError(tmp) + } + } + + return nil +} + +func (r *OffsetCommitResponse) key() int16 { + return 8 +} + +func (r *OffsetCommitResponse) version() int16 { + return 0 +} + +func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { + return MinVersion +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go new file mode 100644 index 00000000000..5a05014b481 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go @@ -0,0 +1,81 @@ +package sarama + +type OffsetFetchRequest struct { + ConsumerGroup string + Version int16 + partitions map[string][]int32 +} + +func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { + if r.Version < 0 || r.Version > 1 { + return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} + } + + if err = pe.putString(r.ConsumerGroup); err != nil { + return err + } + if err = pe.putArrayLength(len(r.partitions)); err != nil { + return err + } + for topic, partitions := range r.partitions { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putInt32Array(partitions); err != nil { + return err + } + } + return nil +} + +func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.ConsumerGroup, err = pd.getString(); err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + if partitionCount == 0 { + return nil + } + r.partitions = make(map[string][]int32) + for i := 0; i < partitionCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitions, err := pd.getInt32Array() + if err != nil { + return err + } + r.partitions[topic] = partitions + } + return nil +} + +func (r *OffsetFetchRequest) key() int16 { + return 9 +} + +func (r *OffsetFetchRequest) version() int16 { + return r.Version +} + +func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + default: + return MinVersion + } +} + +func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { + if r.partitions == nil { + r.partitions = make(map[string][]int32) + } + + r.partitions[topic] = append(r.partitions[topic], partitionID) +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go new file mode 100644 index 00000000000..11e4b1f3fdf --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go @@ -0,0 +1,143 @@ +package sarama + +type OffsetFetchResponseBlock struct { + Offset int64 + Metadata string + Err KError +} + +func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + b.Metadata, err = pd.getString() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + return nil +} + +func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { + pe.putInt64(b.Offset) + + err = pe.putString(b.Metadata) + if err != nil { + return err + } + + pe.putInt16(int16(b.Err)) + + return nil +} + +type OffsetFetchResponse struct { + Blocks map[string]map[int32]*OffsetFetchResponseBlock +} + +func (r *OffsetFetchResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Blocks)); err != nil { + return err + } + for topic, partitions := range r.Blocks { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + if numBlocks == 0 { + r.Blocks[name] = nil + continue + } + r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetFetchResponseBlock) + err = block.decode(pd) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *OffsetFetchResponse) key() int16 { + return 9 +} + +func (r *OffsetFetchResponse) version() int16 { + return 0 +} + +func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { + return MinVersion +} + +func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock) + } + partitions := r.Blocks[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + r.Blocks[topic] = partitions + } + partitions[partition] = block +} diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go new file mode 100644 index 00000000000..4c5df75df05 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_request.go @@ -0,0 +1,132 @@ +package sarama + +type offsetRequestBlock struct { + time int64 + maxOffsets int32 // Only used in version 0 +} + +func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(int64(b.time)) + if version == 0 { + pe.putInt32(b.maxOffsets) + } + + return nil +} + +func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if b.time, err = pd.getInt64(); err != nil { + return err + } + if version == 0 { + if b.maxOffsets, err = pd.getInt32(); err != nil { + return err + } + } + return nil +} + +type OffsetRequest struct { + Version int16 + blocks map[string]map[int32]*offsetRequestBlock +} + +func (r *OffsetRequest) encode(pe packetEncoder) error { + pe.putInt32(-1) // replica ID is always -1 for clients + err := pe.putArrayLength(len(r.blocks)) + if err != nil { + return err + } + for topic, partitions := range r.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe, r.Version); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + + // Ignore replica ID + if _, err := pd.getInt32(); err != nil { + return err + } + blockCount, err := pd.getArrayLength() + if err != nil { + return err + } + if blockCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + for i := 0; i < blockCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetRequestBlock{} + if err := block.decode(pd, version); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetRequest) key() int16 { + return 2 +} + +func (r *OffsetRequest) version() int16 { + return r.Version +} + +func (r *OffsetRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_1_0 + default: + return MinVersion + } +} + +func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + } + + tmp := new(offsetRequestBlock) + tmp.time = time + if r.Version == 0 { + tmp.maxOffsets = maxOffsets + } + + r.blocks[topic][partitionID] = tmp +} diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go new file mode 100644 index 00000000000..8b2193f9a0b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_response.go @@ -0,0 +1,174 @@ +package sarama + +type OffsetResponseBlock struct { + Err KError + Offsets []int64 // Version 0 + Offset int64 // Version 1 + Timestamp int64 // Version 1 +} + +func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + if version == 0 { + b.Offsets, err = pd.getInt64Array() + + return err + } + + b.Timestamp, err = pd.getInt64() + if err != nil { + return err + } + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + // For backwards compatibility put the offset in the offsets array too + b.Offsets = []int64{b.Offset} + + return nil +} + +func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + + if version == 0 { + return pe.putInt64Array(b.Offsets) + } + + pe.putInt64(b.Timestamp) + pe.putInt64(b.Offset) + + return nil +} + +type OffsetResponse struct { + Version int16 + Blocks map[string]map[int32]*OffsetResponseBlock +} + +func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +/* +// [0 0 0 1 ntopics +0 8 109 121 95 116 111 112 105 99 topic +0 0 0 1 npartitions +0 0 0 0 id +0 0 + +0 0 0 1 0 0 0 0 +0 1 1 1 0 0 0 1 +0 8 109 121 95 116 111 112 +105 99 0 0 0 1 0 0 +0 0 0 0 0 0 0 1 +0 0 0 0 0 1 1 1] + +*/ +func (r *OffsetResponse) encode(pe packetEncoder) (err error) { + if err = pe.putArrayLength(len(r.Blocks)); err != nil { + return err + } + + for topic, partitions := range r.Blocks { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe, r.version()); err != nil { + return err + } + } + } + + return nil +} + +func (r *OffsetResponse) key() int16 { + return 2 +} + +func (r *OffsetResponse) version() int16 { + return r.Version +} + +func (r *OffsetResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_1_0 + default: + return MinVersion + } +} + +// testing API + +func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock) + } + byTopic, ok := r.Blocks[topic] + if !ok { + byTopic = make(map[int32]*OffsetResponseBlock) + r.Blocks[topic] = byTopic + } + byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset} +} diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go new file mode 100644 index 00000000000..74805ccbf53 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/packet_decoder.go @@ -0,0 +1,60 @@ +package sarama + +// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. +// Types implementing Decoder only need to worry about calling methods like GetString, +// not about how a string is represented in Kafka. +type packetDecoder interface { + // Primitives + getInt8() (int8, error) + getInt16() (int16, error) + getInt32() (int32, error) + getInt64() (int64, error) + getVarint() (int64, error) + getArrayLength() (int, error) + getBool() (bool, error) + + // Collections + getBytes() ([]byte, error) + getVarintBytes() ([]byte, error) + getRawBytes(length int) ([]byte, error) + getString() (string, error) + getNullableString() (*string, error) + getInt32Array() ([]int32, error) + getInt64Array() ([]int64, error) + getStringArray() ([]string, error) + + // Subsets + remaining() int + getSubset(length int) (packetDecoder, error) + peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset + + // Stacks, see PushDecoder + push(in pushDecoder) error + pop() error +} + +// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity +// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where +// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they +// depend upon have been decoded. +type pushDecoder interface { + // Saves the offset into the input buffer as the location to actually read the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and check the field. + // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes + // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. + check(curOffset int, buf []byte) error +} + +// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the +// fields itself is unknown until its value was decoded (for instance varint encoded length +// fields). +// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength() +type dynamicPushDecoder interface { + pushDecoder + decoder +} diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go new file mode 100644 index 00000000000..67b8daed829 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/packet_encoder.go @@ -0,0 +1,65 @@ +package sarama + +import "github.com/rcrowley/go-metrics" + +// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. +// Types implementing Encoder only need to worry about calling methods like PutString, +// not about how a string is represented in Kafka. +type packetEncoder interface { + // Primitives + putInt8(in int8) + putInt16(in int16) + putInt32(in int32) + putInt64(in int64) + putVarint(in int64) + putArrayLength(in int) error + putBool(in bool) + + // Collections + putBytes(in []byte) error + putVarintBytes(in []byte) error + putRawBytes(in []byte) error + putString(in string) error + putNullableString(in *string) error + putStringArray(in []string) error + putInt32Array(in []int32) error + putInt64Array(in []int64) error + + // Provide the current offset to record the batch size metric + offset() int + + // Stacks, see PushEncoder + push(in pushEncoder) + pop() error + + // To record metrics when provided + metricRegistry() metrics.Registry +} + +// PushEncoder is the interface for encoding fields like CRCs and lengths where the value +// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where +// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they +// depend upon have been written. +type pushEncoder interface { + // Saves the offset into the input buffer as the location to actually write the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and write the field. + // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes + // of data to the saved offset, based on the data between the saved offset and curOffset. + run(curOffset int, buf []byte) error +} + +// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the +// fields itself is unknown until its value was computed (for instance varint encoded length +// fields). +type dynamicPushEncoder interface { + pushEncoder + + // Called during pop() to adjust the length of the field. + // It should return the difference in bytes between the last computed length and current length. + adjustLength(currOffset int) int +} diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go new file mode 100644 index 00000000000..6a708e729ee --- /dev/null +++ b/vendor/github.com/Shopify/sarama/partitioner.go @@ -0,0 +1,217 @@ +package sarama + +import ( + "hash" + "hash/fnv" + "math/rand" + "time" +) + +// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1], +// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided +// as simple default implementations. +type Partitioner interface { + // Partition takes a message and partition count and chooses a partition + Partition(message *ProducerMessage, numPartitions int32) (int32, error) + + // RequiresConsistency indicates to the user of the partitioner whether the + // mapping of key->partition is consistent or not. Specifically, if a + // partitioner requires consistency then it must be allowed to choose from all + // partitions (even ones known to be unavailable), and its choice must be + // respected by the caller. The obvious example is the HashPartitioner. + RequiresConsistency() bool +} + +// DynamicConsistencyPartitioner can optionally be implemented by Partitioners +// in order to allow more flexibility than is originally allowed by the +// RequiresConsistency method in the Partitioner interface. This allows +// partitioners to require consistency sometimes, but not all times. It's useful +// for, e.g., the HashPartitioner, which does not require consistency if the +// message key is nil. +type DynamicConsistencyPartitioner interface { + Partitioner + + // MessageRequiresConsistency is similar to Partitioner.RequiresConsistency, + // but takes in the message being partitioned so that the partitioner can + // make a per-message determination. + MessageRequiresConsistency(message *ProducerMessage) bool +} + +// PartitionerConstructor is the type for a function capable of constructing new Partitioners. +type PartitionerConstructor func(topic string) Partitioner + +type manualPartitioner struct{} + +// HashPartitionOption lets you modify default values of the partitioner +type HashPartitionerOption func(*hashPartitioner) + +// WithAbsFirst means that the partitioner handles absolute values +// in the same way as the reference Java implementation +func WithAbsFirst() HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.referenceAbs = true + } +} + +// WithCustomHashFunction lets you specify what hash function to use for the partitioning +func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.hasher = hasher() + } +} + +// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty +func WithCustomFallbackPartitioner(randomHP *hashPartitioner) HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.random = hp + } +} + +// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided +// ProducerMessage's Partition field as the partition to produce to. +func NewManualPartitioner(topic string) Partitioner { + return new(manualPartitioner) +} + +func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return message.Partition, nil +} + +func (p *manualPartitioner) RequiresConsistency() bool { + return true +} + +type randomPartitioner struct { + generator *rand.Rand +} + +// NewRandomPartitioner returns a Partitioner which chooses a random partition each time. +func NewRandomPartitioner(topic string) Partitioner { + p := new(randomPartitioner) + p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + return p +} + +func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return int32(p.generator.Intn(int(numPartitions))), nil +} + +func (p *randomPartitioner) RequiresConsistency() bool { + return false +} + +type roundRobinPartitioner struct { + partition int32 +} + +// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time. +func NewRoundRobinPartitioner(topic string) Partitioner { + return &roundRobinPartitioner{} +} + +func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if p.partition >= numPartitions { + p.partition = 0 + } + ret := p.partition + p.partition++ + return ret, nil +} + +func (p *roundRobinPartitioner) RequiresConsistency() bool { + return false +} + +type hashPartitioner struct { + random Partitioner + hasher hash.Hash32 + referenceAbs bool +} + +// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher. +// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that +// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance. +func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor { + return func(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = hasher() + p.referenceAbs = false + return p + } +} + +// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options +func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor { + return func(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + p.referenceAbs = false + for _, option := range options { + option(p) + } + return p + } +} + +// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a +// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used, +// modulus the number of partitions. This ensures that messages with the same key always end up on the +// same partition. +func NewHashPartitioner(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + p.referenceAbs = false + return p +} + +// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values +// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do +// that but it had a mistake and now there are people depending on both behaviours. This will +// all go away on the next major version bump. +func NewReferenceHashPartitioner(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + p.referenceAbs = true + return p +} + +func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if message.Key == nil { + return p.random.Partition(message, numPartitions) + } + bytes, err := message.Key.Encode() + if err != nil { + return -1, err + } + p.hasher.Reset() + _, err = p.hasher.Write(bytes) + if err != nil { + return -1, err + } + var partition int32 + // Turns out we were doing our absolute value in a subtly different way from the upstream + // implementation, but now we need to maintain backwards compat for people who started using + // the old version; if referenceAbs is set we are compatible with the reference java client + // but not past Sarama versions + if p.referenceAbs { + partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions + } else { + partition = int32(p.hasher.Sum32()) % numPartitions + if partition < 0 { + partition = -partition + } + } + return partition, nil +} + +func (p *hashPartitioner) RequiresConsistency() bool { + return true +} + +func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool { + return message.Key != nil +} diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go new file mode 100644 index 00000000000..b633cd15111 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/prep_encoder.go @@ -0,0 +1,153 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "math" + + "github.com/rcrowley/go-metrics" +) + +type prepEncoder struct { + stack []pushEncoder + length int +} + +// primitives + +func (pe *prepEncoder) putInt8(in int8) { + pe.length++ +} + +func (pe *prepEncoder) putInt16(in int16) { + pe.length += 2 +} + +func (pe *prepEncoder) putInt32(in int32) { + pe.length += 4 +} + +func (pe *prepEncoder) putInt64(in int64) { + pe.length += 8 +} + +func (pe *prepEncoder) putVarint(in int64) { + var buf [binary.MaxVarintLen64]byte + pe.length += binary.PutVarint(buf[:], in) +} + +func (pe *prepEncoder) putArrayLength(in int) error { + if in > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} + } + pe.length += 4 + return nil +} + +func (pe *prepEncoder) putBool(in bool) { + pe.length++ +} + +// arrays + +func (pe *prepEncoder) putBytes(in []byte) error { + pe.length += 4 + if in == nil { + return nil + } + return pe.putRawBytes(in) +} + +func (pe *prepEncoder) putVarintBytes(in []byte) error { + if in == nil { + pe.putVarint(-1) + return nil + } + pe.putVarint(int64(len(in))) + return pe.putRawBytes(in) +} + +func (pe *prepEncoder) putRawBytes(in []byte) error { + if len(in) > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putNullableString(in *string) error { + if in == nil { + pe.length += 2 + return nil + } + return pe.putString(*in) +} + +func (pe *prepEncoder) putString(in string) error { + pe.length += 2 + if len(in) > math.MaxInt16 { + return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putStringArray(in []string) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + + for _, str := range in { + if err := pe.putString(str); err != nil { + return err + } + } + + return nil +} + +func (pe *prepEncoder) putInt32Array(in []int32) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putInt64Array(in []int64) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 8 * len(in) + return nil +} + +func (pe *prepEncoder) offset() int { + return pe.length +} + +// stackable + +func (pe *prepEncoder) push(in pushEncoder) { + in.saveOffset(pe.length) + pe.length += in.reserveLength() + pe.stack = append(pe.stack, in) +} + +func (pe *prepEncoder) pop() error { + in := pe.stack[len(pe.stack)-1] + pe.stack = pe.stack[:len(pe.stack)-1] + if dpe, ok := in.(dynamicPushEncoder); ok { + pe.length += dpe.adjustLength(pe.length) + } + + return nil +} + +// we do not record metrics during the prep encoder pass +func (pe *prepEncoder) metricRegistry() metrics.Registry { + return nil +} diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go new file mode 100644 index 00000000000..0c755d02b64 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_request.go @@ -0,0 +1,252 @@ +package sarama + +import "github.com/rcrowley/go-metrics" + +// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements +// it must see before responding. Any of the constants defined here are valid. On broker versions +// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many +// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced +// by setting the `min.isr` value in the brokers configuration). +type RequiredAcks int16 + +const ( + // NoResponse doesn't send any response, the TCP ACK is all you get. + NoResponse RequiredAcks = 0 + // WaitForLocal waits for only the local commit to succeed before responding. + WaitForLocal RequiredAcks = 1 + // WaitForAll waits for all in-sync replicas to commit before responding. + // The minimum number of in-sync replicas is configured on the broker via + // the `min.insync.replicas` configuration key. + WaitForAll RequiredAcks = -1 +) + +type ProduceRequest struct { + TransactionalID *string + RequiredAcks RequiredAcks + Timeout int32 + Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11 + records map[string]map[int32]Records +} + +func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + var topicRecordCount int64 + for _, messageBlock := range msgSet.Messages { + // Is this a fake "message" wrapping real messages? + if messageBlock.Msg.Set != nil { + topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) + } else { + // A single uncompressed message + topicRecordCount++ + } + // Better be safe than sorry when computing the compression ratio + if messageBlock.Msg.compressedSize != 0 { + compressionRatio := float64(len(messageBlock.Msg.Value)) / + float64(messageBlock.Msg.compressedSize) + // Histogram do not support decimal values, let's multiple it by 100 for better precision + intCompressionRatio := int64(100 * compressionRatio) + compressionRatioMetric.Update(intCompressionRatio) + topicCompressionRatioMetric.Update(intCompressionRatio) + } + } + return topicRecordCount +} + +func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + if recordBatch.compressedRecords != nil { + compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100) + compressionRatioMetric.Update(compressionRatio) + topicCompressionRatioMetric.Update(compressionRatio) + } + + return int64(len(recordBatch.Records)) +} + +func (r *ProduceRequest) encode(pe packetEncoder) error { + if r.Version >= 3 { + if err := pe.putNullableString(r.TransactionalID); err != nil { + return err + } + } + pe.putInt16(int16(r.RequiredAcks)) + pe.putInt32(r.Timeout) + metricRegistry := pe.metricRegistry() + var batchSizeMetric metrics.Histogram + var compressionRatioMetric metrics.Histogram + if metricRegistry != nil { + batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry) + compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry) + } + totalRecordCount := int64(0) + + err := pe.putArrayLength(len(r.records)) + if err != nil { + return err + } + + for topic, partitions := range r.records { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + topicRecordCount := int64(0) + var topicCompressionRatioMetric metrics.Histogram + if metricRegistry != nil { + topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry) + } + for id, records := range partitions { + startOffset := pe.offset() + pe.putInt32(id) + pe.push(&lengthField{}) + err = records.encode(pe) + if err != nil { + return err + } + err = pe.pop() + if err != nil { + return err + } + if metricRegistry != nil { + if r.Version >= 3 { + topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric) + } else { + topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric) + } + batchSize := int64(pe.offset() - startOffset) + batchSizeMetric.Update(batchSize) + getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize) + } + } + if topicRecordCount > 0 { + getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount) + getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount) + totalRecordCount += topicRecordCount + } + } + if totalRecordCount > 0 { + metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount) + getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount) + } + + return nil +} + +func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + + if version >= 3 { + id, err := pd.getNullableString() + if err != nil { + return err + } + r.TransactionalID = id + } + requiredAcks, err := pd.getInt16() + if err != nil { + return err + } + r.RequiredAcks = RequiredAcks(requiredAcks) + if r.Timeout, err = pd.getInt32(); err != nil { + return err + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + + r.records = make(map[string]map[int32]Records) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.records[topic] = make(map[int32]Records) + + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + size, err := pd.getInt32() + if err != nil { + return err + } + recordsDecoder, err := pd.getSubset(int(size)) + if err != nil { + return err + } + var records Records + if err := records.decode(recordsDecoder); err != nil { + return err + } + r.records[topic][partition] = records + } + } + + return nil +} + +func (r *ProduceRequest) key() int16 { + return 0 +} + +func (r *ProduceRequest) version() int16 { + return r.Version +} + +func (r *ProduceRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_11_0_0 + default: + return MinVersion + } +} + +func (r *ProduceRequest) ensureRecords(topic string, partition int32) { + if r.records == nil { + r.records = make(map[string]map[int32]Records) + } + + if r.records[topic] == nil { + r.records[topic] = make(map[int32]Records) + } +} + +func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { + r.ensureRecords(topic, partition) + set := r.records[topic][partition].MsgSet + + if set == nil { + set = new(MessageSet) + r.records[topic][partition] = newLegacyRecords(set) + } + + set.addMessage(msg) +} + +func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { + r.ensureRecords(topic, partition) + r.records[topic][partition] = newLegacyRecords(set) +} + +func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) { + r.ensureRecords(topic, partition) + r.records[topic][partition] = newDefaultRecords(batch) +} diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go new file mode 100644 index 00000000000..667e34c661b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_response.go @@ -0,0 +1,183 @@ +package sarama + +import ( + "fmt" + "time" +) + +type ProduceResponseBlock struct { + Err KError + Offset int64 + // only provided if Version >= 2 and the broker is configured with `LogAppendTime` + Timestamp time.Time +} + +func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 2 { + if millis, err := pd.getInt64(); err != nil { + return err + } else if millis != -1 { + b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + } + + return nil +} + +func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + pe.putInt64(b.Offset) + + if version >= 2 { + timestamp := int64(-1) + if !b.Timestamp.Before(time.Unix(0, 0)) { + timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond) + } else if !b.Timestamp.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)} + } + pe.putInt64(timestamp) + } + + return nil +} + +type ProduceResponse struct { + Blocks map[string]map[int32]*ProduceResponseBlock + Version int16 + ThrottleTime time.Duration // only provided if Version >= 1 +} + +func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(ProduceResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + if r.Version >= 1 { + millis, err := pd.getInt32() + if err != nil { + return err + } + + r.ThrottleTime = time.Duration(millis) * time.Millisecond + } + + return nil +} + +func (r *ProduceResponse) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Blocks)) + if err != nil { + return err + } + for topic, partitions := range r.Blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for id, prb := range partitions { + pe.putInt32(id) + err = prb.encode(pe, r.Version) + if err != nil { + return err + } + } + } + if r.Version >= 1 { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + } + return nil +} + +func (r *ProduceResponse) key() int16 { + return 0 +} + +func (r *ProduceResponse) version() int16 { + return r.Version +} + +func (r *ProduceResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_11_0_0 + default: + return MinVersion + } +} + +func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +// Testing API + +func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock) + } + byTopic, ok := r.Blocks[topic] + if !ok { + byTopic = make(map[int32]*ProduceResponseBlock) + r.Blocks[topic] = byTopic + } + byTopic[partition] = &ProduceResponseBlock{Err: err} +} diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go new file mode 100644 index 00000000000..13be2b3c92b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_set.go @@ -0,0 +1,252 @@ +package sarama + +import ( + "encoding/binary" + "time" +) + +type partitionSet struct { + msgs []*ProducerMessage + recordsToSend Records + bufferBytes int +} + +type produceSet struct { + parent *asyncProducer + msgs map[string]map[int32]*partitionSet + + bufferBytes int + bufferCount int +} + +func newProduceSet(parent *asyncProducer) *produceSet { + return &produceSet{ + msgs: make(map[string]map[int32]*partitionSet), + parent: parent, + } +} + +func (ps *produceSet) add(msg *ProducerMessage) error { + var err error + var key, val []byte + + if msg.Key != nil { + if key, err = msg.Key.Encode(); err != nil { + return err + } + } + + if msg.Value != nil { + if val, err = msg.Value.Encode(); err != nil { + return err + } + } + + timestamp := msg.Timestamp + if msg.Timestamp.IsZero() { + timestamp = time.Now() + } + + partitions := ps.msgs[msg.Topic] + if partitions == nil { + partitions = make(map[int32]*partitionSet) + ps.msgs[msg.Topic] = partitions + } + + var size int + + set := partitions[msg.Partition] + if set == nil { + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + batch := &RecordBatch{ + FirstTimestamp: timestamp, + Version: 2, + ProducerID: -1, /* No producer id */ + Codec: ps.parent.conf.Producer.Compression, + CompressionLevel: ps.parent.conf.Producer.CompressionLevel, + } + set = &partitionSet{recordsToSend: newDefaultRecords(batch)} + size = recordBatchOverhead + } else { + set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))} + } + partitions[msg.Partition] = set + } + + set.msgs = append(set.msgs, msg) + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + // We are being conservative here to avoid having to prep encode the record + size += maximumRecordOverhead + rec := &Record{ + Key: key, + Value: val, + TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp), + } + size += len(key) + len(val) + if len(msg.Headers) > 0 { + rec.Headers = make([]*RecordHeader, len(msg.Headers)) + for i := range msg.Headers { + rec.Headers[i] = &msg.Headers[i] + size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32 + } + } + set.recordsToSend.RecordBatch.addRecord(rec) + } else { + msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + msgToSend.Timestamp = timestamp + msgToSend.Version = 1 + } + set.recordsToSend.MsgSet.addMessage(msgToSend) + size = producerMessageOverhead + len(key) + len(val) + } + + set.bufferBytes += size + ps.bufferBytes += size + ps.bufferCount++ + + return nil +} + +func (ps *produceSet) buildRequest() *ProduceRequest { + req := &ProduceRequest{ + RequiredAcks: ps.parent.conf.Producer.RequiredAcks, + Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond), + } + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + req.Version = 2 + } + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + req.Version = 3 + } + + for topic, partitionSet := range ps.msgs { + for partition, set := range partitionSet { + if req.Version >= 3 { + // If the API version we're hitting is 3 or greater, we need to calculate + // offsets for each record in the batch relative to FirstOffset. + // Additionally, we must set LastOffsetDelta to the value of the last offset + // in the batch. Since the OffsetDelta of the first record is 0, we know that the + // final record of any batch will have an offset of (# of records in batch) - 1. + // (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets + // under the RecordBatch section for details.) + rb := set.recordsToSend.RecordBatch + if len(rb.Records) > 0 { + rb.LastOffsetDelta = int32(len(rb.Records) - 1) + for i, record := range rb.Records { + record.OffsetDelta = int64(i) + } + } + + req.AddBatch(topic, partition, rb) + continue + } + if ps.parent.conf.Producer.Compression == CompressionNone { + req.AddSet(topic, partition, set.recordsToSend.MsgSet) + } else { + // When compression is enabled, the entire set for each partition is compressed + // and sent as the payload of a single fake "message" with the appropriate codec + // set and no key. When the server sees a message with a compression codec, it + // decompresses the payload and treats the result as its message set. + + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + // If our version is 0.10 or later, assign relative offsets + // to the inner messages. This lets the broker avoid + // recompressing the message set. + // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets + // for details on relative offsets.) + for i, msg := range set.recordsToSend.MsgSet.Messages { + msg.Offset = int64(i) + } + } + payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry) + if err != nil { + Logger.Println(err) // if this happens, it's basically our fault. + panic(err) + } + compMsg := &Message{ + Codec: ps.parent.conf.Producer.Compression, + CompressionLevel: ps.parent.conf.Producer.CompressionLevel, + Key: nil, + Value: payload, + Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics + } + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + compMsg.Version = 1 + compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp + } + req.AddMessage(topic, partition, compMsg) + } + } + } + + return req +} + +func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) { + for topic, partitionSet := range ps.msgs { + for partition, set := range partitionSet { + cb(topic, partition, set.msgs) + } + } +} + +func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage { + if ps.msgs[topic] == nil { + return nil + } + set := ps.msgs[topic][partition] + if set == nil { + return nil + } + ps.bufferBytes -= set.bufferBytes + ps.bufferCount -= len(set.msgs) + delete(ps.msgs[topic], partition) + return set.msgs +} + +func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { + version := 1 + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } + + switch { + // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. + case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)): + return true + // Would we overflow the size-limit of a compressed message-batch for this partition? + case ps.parent.conf.Producer.Compression != CompressionNone && + ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && + ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: + return true + // Would we overflow simply in number of messages? + case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: + return true + default: + return false + } +} + +func (ps *produceSet) readyToFlush() bool { + switch { + // If we don't have any messages, nothing else matters + case ps.empty(): + return false + // If all three config values are 0, we always flush as-fast-as-possible + case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0: + return true + // If we've passed the message trigger-point + case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages: + return true + // If we've passed the byte trigger-point + case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes: + return true + default: + return false + } +} + +func (ps *produceSet) empty() bool { + return ps.bufferCount == 0 +} diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go new file mode 100644 index 00000000000..23045e7d33a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/real_decoder.go @@ -0,0 +1,324 @@ +package sarama + +import ( + "encoding/binary" + "math" +) + +var errInvalidArrayLength = PacketDecodingError{"invalid array length"} +var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} +var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"} +var errInvalidStringLength = PacketDecodingError{"invalid string length"} +var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"} +var errVarintOverflow = PacketDecodingError{"varint overflow"} +var errInvalidBool = PacketDecodingError{"invalid bool"} + +type realDecoder struct { + raw []byte + off int + stack []pushDecoder +} + +// primitives + +func (rd *realDecoder) getInt8() (int8, error) { + if rd.remaining() < 1 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int8(rd.raw[rd.off]) + rd.off++ + return tmp, nil +} + +func (rd *realDecoder) getInt16() (int16, error) { + if rd.remaining() < 2 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) + rd.off += 2 + return tmp, nil +} + +func (rd *realDecoder) getInt32() (int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + return tmp, nil +} + +func (rd *realDecoder) getInt64() (int64, error) { + if rd.remaining() < 8 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + return tmp, nil +} + +func (rd *realDecoder) getVarint() (int64, error) { + tmp, n := binary.Varint(rd.raw[rd.off:]) + if n == 0 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + if n < 0 { + rd.off -= n + return -1, errVarintOverflow + } + rd.off += n + return tmp, nil +} + +func (rd *realDecoder) getArrayLength() (int, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))) + rd.off += 4 + if tmp > rd.remaining() { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } else if tmp > 2*math.MaxUint16 { + return -1, errInvalidArrayLength + } + return tmp, nil +} + +func (rd *realDecoder) getBool() (bool, error) { + b, err := rd.getInt8() + if err != nil || b == 0 { + return false, err + } + if b != 1 { + return false, errInvalidBool + } + return true, nil +} + +// collections + +func (rd *realDecoder) getBytes() ([]byte, error) { + tmp, err := rd.getInt32() + if err != nil { + return nil, err + } + if tmp == -1 { + return nil, nil + } + + return rd.getRawBytes(int(tmp)) +} + +func (rd *realDecoder) getVarintBytes() ([]byte, error) { + tmp, err := rd.getVarint() + if err != nil { + return nil, err + } + if tmp == -1 { + return nil, nil + } + + return rd.getRawBytes(int(tmp)) +} + +func (rd *realDecoder) getStringLength() (int, error) { + length, err := rd.getInt16() + if err != nil { + return 0, err + } + + n := int(length) + + switch { + case n < -1: + return 0, errInvalidStringLength + case n > rd.remaining(): + rd.off = len(rd.raw) + return 0, ErrInsufficientData + } + + return n, nil +} + +func (rd *realDecoder) getString() (string, error) { + n, err := rd.getStringLength() + if err != nil || n == -1 { + return "", err + } + + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return tmpStr, nil +} + +func (rd *realDecoder) getNullableString() (*string, error) { + n, err := rd.getStringLength() + if err != nil || n == -1 { + return nil, err + } + + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return &tmpStr, err +} + +func (rd *realDecoder) getInt32Array() ([]int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 4*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]int32, n) + for i := range ret { + ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + } + return ret, nil +} + +func (rd *realDecoder) getInt64Array() ([]int64, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 8*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]int64, n) + for i := range ret { + ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + } + return ret, nil +} + +func (rd *realDecoder) getStringArray() ([]string, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]string, n) + for i := range ret { + str, err := rd.getString() + if err != nil { + return nil, err + } + + ret[i] = str + } + return ret, nil +} + +// subsets + +func (rd *realDecoder) remaining() int { + return len(rd.raw) - rd.off +} + +func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { + buf, err := rd.getRawBytes(length) + if err != nil { + return nil, err + } + return &realDecoder{raw: buf}, nil +} + +func (rd *realDecoder) getRawBytes(length int) ([]byte, error) { + if length < 0 { + return nil, errInvalidByteSliceLength + } else if length > rd.remaining() { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + start := rd.off + rd.off += length + return rd.raw[start:rd.off], nil +} + +func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) { + if rd.remaining() < offset+length { + return nil, ErrInsufficientData + } + off := rd.off + offset + return &realDecoder{raw: rd.raw[off : off+length]}, nil +} + +// stacks + +func (rd *realDecoder) push(in pushDecoder) error { + in.saveOffset(rd.off) + + var reserve int + if dpd, ok := in.(dynamicPushDecoder); ok { + if err := dpd.decode(rd); err != nil { + return err + } + } else { + reserve = in.reserveLength() + if rd.remaining() < reserve { + rd.off = len(rd.raw) + return ErrInsufficientData + } + } + + rd.stack = append(rd.stack, in) + + rd.off += reserve + + return nil +} + +func (rd *realDecoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := rd.stack[len(rd.stack)-1] + rd.stack = rd.stack[:len(rd.stack)-1] + + return in.check(rd.off, rd.raw) +} diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go new file mode 100644 index 00000000000..3c75387f779 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/real_encoder.go @@ -0,0 +1,156 @@ +package sarama + +import ( + "encoding/binary" + + "github.com/rcrowley/go-metrics" +) + +type realEncoder struct { + raw []byte + off int + stack []pushEncoder + registry metrics.Registry +} + +// primitives + +func (re *realEncoder) putInt8(in int8) { + re.raw[re.off] = byte(in) + re.off++ +} + +func (re *realEncoder) putInt16(in int16) { + binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in)) + re.off += 2 +} + +func (re *realEncoder) putInt32(in int32) { + binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in)) + re.off += 4 +} + +func (re *realEncoder) putInt64(in int64) { + binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in)) + re.off += 8 +} + +func (re *realEncoder) putVarint(in int64) { + re.off += binary.PutVarint(re.raw[re.off:], in) +} + +func (re *realEncoder) putArrayLength(in int) error { + re.putInt32(int32(in)) + return nil +} + +func (re *realEncoder) putBool(in bool) { + if in { + re.putInt8(1) + return + } + re.putInt8(0) +} + +// collection + +func (re *realEncoder) putRawBytes(in []byte) error { + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putBytes(in []byte) error { + if in == nil { + re.putInt32(-1) + return nil + } + re.putInt32(int32(len(in))) + return re.putRawBytes(in) +} + +func (re *realEncoder) putVarintBytes(in []byte) error { + if in == nil { + re.putVarint(-1) + return nil + } + re.putVarint(int64(len(in))) + return re.putRawBytes(in) +} + +func (re *realEncoder) putString(in string) error { + re.putInt16(int16(len(in))) + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putNullableString(in *string) error { + if in == nil { + re.putInt16(-1) + return nil + } + return re.putString(*in) +} + +func (re *realEncoder) putStringArray(in []string) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + + for _, val := range in { + if err := re.putString(val); err != nil { + return err + } + } + + return nil +} + +func (re *realEncoder) putInt32Array(in []int32) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putInt64Array(in []int64) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt64(val) + } + return nil +} + +func (re *realEncoder) offset() int { + return re.off +} + +// stacks + +func (re *realEncoder) push(in pushEncoder) { + in.saveOffset(re.off) + re.off += in.reserveLength() + re.stack = append(re.stack, in) +} + +func (re *realEncoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := re.stack[len(re.stack)-1] + re.stack = re.stack[:len(re.stack)-1] + + return in.run(re.off, re.raw) +} + +// we do record metrics during the real encoder pass +func (re *realEncoder) metricRegistry() metrics.Registry { + return re.registry +} diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go new file mode 100644 index 00000000000..cded308cf0f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record.go @@ -0,0 +1,113 @@ +package sarama + +import ( + "encoding/binary" + "time" +) + +const ( + controlMask = 0x20 + maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1 +) + +type RecordHeader struct { + Key []byte + Value []byte +} + +func (h *RecordHeader) encode(pe packetEncoder) error { + if err := pe.putVarintBytes(h.Key); err != nil { + return err + } + return pe.putVarintBytes(h.Value) +} + +func (h *RecordHeader) decode(pd packetDecoder) (err error) { + if h.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if h.Value, err = pd.getVarintBytes(); err != nil { + return err + } + return nil +} + +type Record struct { + Attributes int8 + TimestampDelta time.Duration + OffsetDelta int64 + Key []byte + Value []byte + Headers []*RecordHeader + + length varintLengthField +} + +func (r *Record) encode(pe packetEncoder) error { + pe.push(&r.length) + pe.putInt8(r.Attributes) + pe.putVarint(int64(r.TimestampDelta / time.Millisecond)) + pe.putVarint(r.OffsetDelta) + if err := pe.putVarintBytes(r.Key); err != nil { + return err + } + if err := pe.putVarintBytes(r.Value); err != nil { + return err + } + pe.putVarint(int64(len(r.Headers))) + + for _, h := range r.Headers { + if err := h.encode(pe); err != nil { + return err + } + } + + return pe.pop() +} + +func (r *Record) decode(pd packetDecoder) (err error) { + if err = pd.push(&r.length); err != nil { + return err + } + + if r.Attributes, err = pd.getInt8(); err != nil { + return err + } + + timestamp, err := pd.getVarint() + if err != nil { + return err + } + r.TimestampDelta = time.Duration(timestamp) * time.Millisecond + + if r.OffsetDelta, err = pd.getVarint(); err != nil { + return err + } + + if r.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if r.Value, err = pd.getVarintBytes(); err != nil { + return err + } + + numHeaders, err := pd.getVarint() + if err != nil { + return err + } + + if numHeaders >= 0 { + r.Headers = make([]*RecordHeader, numHeaders) + } + for i := int64(0); i < numHeaders; i++ { + hdr := new(RecordHeader) + if err := hdr.decode(pd); err != nil { + return err + } + r.Headers[i] = hdr + } + + return pd.pop() +} diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go new file mode 100644 index 00000000000..845318aa341 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record_batch.go @@ -0,0 +1,268 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "time" + + "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +const recordBatchOverhead = 49 + +type recordsArray []*Record + +func (e recordsArray) encode(pe packetEncoder) error { + for _, r := range e { + if err := r.encode(pe); err != nil { + return err + } + } + return nil +} + +func (e recordsArray) decode(pd packetDecoder) error { + for i := range e { + rec := &Record{} + if err := rec.decode(pd); err != nil { + return err + } + e[i] = rec + } + return nil +} + +type RecordBatch struct { + FirstOffset int64 + PartitionLeaderEpoch int32 + Version int8 + Codec CompressionCodec + CompressionLevel int + Control bool + LastOffsetDelta int32 + FirstTimestamp time.Time + MaxTimestamp time.Time + ProducerID int64 + ProducerEpoch int16 + FirstSequence int32 + Records []*Record + PartialTrailingRecord bool + + compressedRecords []byte + recordsLen int // uncompressed records size +} + +func (b *RecordBatch) encode(pe packetEncoder) error { + if b.Version != 2 { + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} + } + pe.putInt64(b.FirstOffset) + pe.push(&lengthField{}) + pe.putInt32(b.PartitionLeaderEpoch) + pe.putInt8(b.Version) + pe.push(newCRC32Field(crcCastagnoli)) + pe.putInt16(b.computeAttributes()) + pe.putInt32(b.LastOffsetDelta) + + if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil { + return err + } + + if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil { + return err + } + + pe.putInt64(b.ProducerID) + pe.putInt16(b.ProducerEpoch) + pe.putInt32(b.FirstSequence) + + if err := pe.putArrayLength(len(b.Records)); err != nil { + return err + } + + if b.compressedRecords == nil { + if err := b.encodeRecords(pe); err != nil { + return err + } + } + if err := pe.putRawBytes(b.compressedRecords); err != nil { + return err + } + + if err := pe.pop(); err != nil { + return err + } + return pe.pop() +} + +func (b *RecordBatch) decode(pd packetDecoder) (err error) { + if b.FirstOffset, err = pd.getInt64(); err != nil { + return err + } + + batchLen, err := pd.getInt32() + if err != nil { + return err + } + + if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + + if b.Version, err = pd.getInt8(); err != nil { + return err + } + + if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil { + return err + } + + attributes, err := pd.getInt16() + if err != nil { + return err + } + b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask) + b.Control = attributes&controlMask == controlMask + + if b.LastOffsetDelta, err = pd.getInt32(); err != nil { + return err + } + + if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil { + return err + } + + if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil { + return err + } + + if b.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if b.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + if b.FirstSequence, err = pd.getInt32(); err != nil { + return err + } + + numRecs, err := pd.getArrayLength() + if err != nil { + return err + } + if numRecs >= 0 { + b.Records = make([]*Record, numRecs) + } + + bufSize := int(batchLen) - recordBatchOverhead + recBuffer, err := pd.getRawBytes(bufSize) + if err != nil { + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err + } + + if err = pd.pop(); err != nil { + return err + } + + switch b.Codec { + case CompressionNone: + case CompressionGZIP: + reader, err := gzip.NewReader(bytes.NewReader(recBuffer)) + if err != nil { + return err + } + if recBuffer, err = ioutil.ReadAll(reader); err != nil { + return err + } + case CompressionSnappy: + if recBuffer, err = snappy.Decode(recBuffer); err != nil { + return err + } + case CompressionLZ4: + reader := lz4.NewReader(bytes.NewReader(recBuffer)) + if recBuffer, err = ioutil.ReadAll(reader); err != nil { + return err + } + default: + return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", b.Codec)} + } + + b.recordsLen = len(recBuffer) + err = decode(recBuffer, recordsArray(b.Records)) + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err +} + +func (b *RecordBatch) encodeRecords(pe packetEncoder) error { + var raw []byte + var err error + if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil { + return err + } + b.recordsLen = len(raw) + + switch b.Codec { + case CompressionNone: + b.compressedRecords = raw + case CompressionGZIP: + var buf bytes.Buffer + var writer *gzip.Writer + if b.CompressionLevel != CompressionLevelDefault { + writer, err = gzip.NewWriterLevel(&buf, b.CompressionLevel) + if err != nil { + return err + } + } else { + writer = gzip.NewWriter(&buf) + } + if _, err := writer.Write(raw); err != nil { + return err + } + if err := writer.Close(); err != nil { + return err + } + b.compressedRecords = buf.Bytes() + case CompressionSnappy: + b.compressedRecords = snappy.Encode(raw) + case CompressionLZ4: + var buf bytes.Buffer + writer := lz4.NewWriter(&buf) + if _, err := writer.Write(raw); err != nil { + return err + } + if err := writer.Close(); err != nil { + return err + } + b.compressedRecords = buf.Bytes() + default: + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} + } + + return nil +} + +func (b *RecordBatch) computeAttributes() int16 { + attr := int16(b.Codec) & int16(compressionCodecMask) + if b.Control { + attr |= controlMask + } + return attr +} + +func (b *RecordBatch) addRecord(r *Record) { + b.Records = append(b.Records, r) +} diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go new file mode 100644 index 00000000000..4d211a14f17 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/request.go @@ -0,0 +1,149 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "io" +) + +type protocolBody interface { + encoder + versionedDecoder + key() int16 + version() int16 + requiredVersion() KafkaVersion +} + +type request struct { + correlationID int32 + clientID string + body protocolBody +} + +func (r *request) encode(pe packetEncoder) (err error) { + pe.push(&lengthField{}) + pe.putInt16(r.body.key()) + pe.putInt16(r.body.version()) + pe.putInt32(r.correlationID) + err = pe.putString(r.clientID) + if err != nil { + return err + } + err = r.body.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +func (r *request) decode(pd packetDecoder) (err error) { + var key int16 + if key, err = pd.getInt16(); err != nil { + return err + } + var version int16 + if version, err = pd.getInt16(); err != nil { + return err + } + if r.correlationID, err = pd.getInt32(); err != nil { + return err + } + r.clientID, err = pd.getString() + + r.body = allocateBody(key, version) + if r.body == nil { + return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} + } + return r.body.decode(pd, version) +} + +func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) { + lengthBytes := make([]byte, 4) + if _, err := io.ReadFull(r, lengthBytes); err != nil { + return nil, bytesRead, err + } + bytesRead += len(lengthBytes) + + length := int32(binary.BigEndian.Uint32(lengthBytes)) + if length <= 4 || length > MaxRequestSize { + return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} + } + + encodedReq := make([]byte, length) + if _, err := io.ReadFull(r, encodedReq); err != nil { + return nil, bytesRead, err + } + bytesRead += len(encodedReq) + + req = &request{} + if err := decode(encodedReq, req); err != nil { + return nil, bytesRead, err + } + return req, bytesRead, nil +} + +func allocateBody(key, version int16) protocolBody { + switch key { + case 0: + return &ProduceRequest{} + case 1: + return &FetchRequest{} + case 2: + return &OffsetRequest{Version: version} + case 3: + return &MetadataRequest{} + case 8: + return &OffsetCommitRequest{Version: version} + case 9: + return &OffsetFetchRequest{} + case 10: + return &FindCoordinatorRequest{} + case 11: + return &JoinGroupRequest{} + case 12: + return &HeartbeatRequest{} + case 13: + return &LeaveGroupRequest{} + case 14: + return &SyncGroupRequest{} + case 15: + return &DescribeGroupsRequest{} + case 16: + return &ListGroupsRequest{} + case 17: + return &SaslHandshakeRequest{} + case 18: + return &ApiVersionsRequest{} + case 19: + return &CreateTopicsRequest{} + case 20: + return &DeleteTopicsRequest{} + case 21: + return &DeleteRecordsRequest{} + case 22: + return &InitProducerIDRequest{} + case 24: + return &AddPartitionsToTxnRequest{} + case 25: + return &AddOffsetsToTxnRequest{} + case 26: + return &EndTxnRequest{} + case 28: + return &TxnOffsetCommitRequest{} + case 29: + return &DescribeAclsRequest{} + case 30: + return &CreateAclsRequest{} + case 31: + return &DeleteAclsRequest{} + case 32: + return &DescribeConfigsRequest{} + case 33: + return &AlterConfigsRequest{} + case 37: + return &CreatePartitionsRequest{} + case 42: + return &DeleteGroupsRequest{} + } + return nil +} diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go new file mode 100644 index 00000000000..f3f4d27d6c4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/response_header.go @@ -0,0 +1,21 @@ +package sarama + +import "fmt" + +type responseHeader struct { + length int32 + correlationID int32 +} + +func (r *responseHeader) decode(pd packetDecoder) (err error) { + r.length, err = pd.getInt32() + if err != nil { + return err + } + if r.length <= 4 || r.length > MaxResponseSize { + return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)} + } + + r.correlationID, err = pd.getInt32() + return err +} diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go new file mode 100644 index 00000000000..7d5dc60d3e2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sarama.go @@ -0,0 +1,99 @@ +/* +Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level +API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level +API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation. + +To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel +and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases. +The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be +useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees +depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the +SyncProducer can still sometimes be lost. + +To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic +consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the +https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 +and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. + +For lower-level needs, the Broker and Request/Response objects permit precise control over each connection +and message sent on the wire; the Client provides higher-level metadata management that is shared between +the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up +exactly with the protocol fields documented by Kafka at +https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + +Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry. + +Broker related metrics: + + +----------------------------------------------+------------+---------------------------------------------------------------+ + | Name | Type | Description | + +----------------------------------------------+------------+---------------------------------------------------------------+ + | incoming-byte-rate | meter | Bytes/second read off all brokers | + | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | + | outgoing-byte-rate | meter | Bytes/second written off all brokers | + | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | + | request-rate | meter | Requests/second sent to all brokers | + | request-rate-for-broker- | meter | Requests/second sent to a given broker | + | request-size | histogram | Distribution of the request size in bytes for all brokers | + | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | + | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | + | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | + | response-rate | meter | Responses/second received from all brokers | + | response-rate-for-broker- | meter | Responses/second received from a given broker | + | response-size | histogram | Distribution of the response size in bytes for all brokers | + | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | + +----------------------------------------------+------------+---------------------------------------------------------------+ + +Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. + +Producer related metrics: + + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | Name | Type | Description | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics | + | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic | + | record-send-rate | meter | Records/second sent to all topics | + | record-send-rate-for-topic- | meter | Records/second sent to a given topic | + | records-per-request | histogram | Distribution of the number of records sent per request for all topics | + | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic | + | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics | + | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + +*/ +package sarama + +import ( + "io/ioutil" + "log" +) + +// Logger is the instance of a StdLogger interface that Sarama writes connection +// management events to. By default it is set to discard all log messages via ioutil.Discard, +// but you can set it to redirect wherever you want. +var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) + +// StdLogger is used to log error messages. +type StdLogger interface { + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +// PanicHandler is called for recovering from panics spawned internally to the library (and thus +// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. +var PanicHandler func(interface{}) + +// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying +// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned +// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt +// to process. +var MaxRequestSize int32 = 100 * 1024 * 1024 + +// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If +// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to +// protect the client from running out of memory. Please note that brokers do not have any natural limit on +// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers +// (see https://issues.apache.org/jira/browse/KAFKA-2063). +var MaxResponseSize int32 = 100 * 1024 * 1024 diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go new file mode 100644 index 00000000000..fbbc8947b2e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go @@ -0,0 +1,33 @@ +package sarama + +type SaslHandshakeRequest struct { + Mechanism string +} + +func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.Mechanism); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) { + if r.Mechanism, err = pd.getString(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) key() int16 { + return 17 +} + +func (r *SaslHandshakeRequest) version() int16 { + return 0 +} + +func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go new file mode 100644 index 00000000000..ef290d4bc6d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go @@ -0,0 +1,38 @@ +package sarama + +type SaslHandshakeResponse struct { + Err KError + EnabledMechanisms []string +} + +func (r *SaslHandshakeResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return pe.putStringArray(r.EnabledMechanisms) +} + +func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.EnabledMechanisms, err = pd.getStringArray(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeResponse) key() int16 { + return 17 +} + +func (r *SaslHandshakeResponse) version() int16 { + return 0 +} + +func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go new file mode 100644 index 00000000000..fe207080e03 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_group_request.go @@ -0,0 +1,100 @@ +package sarama + +type SyncGroupRequest struct { + GroupId string + GenerationId int32 + MemberId string + GroupAssignments map[string][]byte +} + +func (r *SyncGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.MemberId); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil { + return err + } + for memberId, memberAssignment := range r.GroupAssignments { + if err := pe.putString(memberId); err != nil { + return err + } + if err := pe.putBytes(memberAssignment); err != nil { + return err + } + } + + return nil +} + +func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupAssignments = make(map[string][]byte) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + memberAssignment, err := pd.getBytes() + if err != nil { + return err + } + + r.GroupAssignments[memberId] = memberAssignment + } + + return nil +} + +func (r *SyncGroupRequest) key() int16 { + return 14 +} + +func (r *SyncGroupRequest) version() int16 { + return 0 +} + +func (r *SyncGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { + if r.GroupAssignments == nil { + r.GroupAssignments = make(map[string][]byte) + } + + r.GroupAssignments[memberId] = memberAssignment +} + +func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error { + bin, err := encode(memberAssignment, nil) + if err != nil { + return err + } + + r.AddGroupAssignment(memberId, bin) + return nil +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go new file mode 100644 index 00000000000..194b382b4ab --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_group_response.go @@ -0,0 +1,41 @@ +package sarama + +type SyncGroupResponse struct { + Err KError + MemberAssignment []byte +} + +func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + assignment := new(ConsumerGroupMemberAssignment) + err := decode(r.MemberAssignment, assignment) + return assignment, err +} + +func (r *SyncGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return pe.putBytes(r.MemberAssignment) +} + +func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + r.MemberAssignment, err = pd.getBytes() + return +} + +func (r *SyncGroupResponse) key() int16 { + return 14 +} + +func (r *SyncGroupResponse) version() int16 { + return 0 +} + +func (r *SyncGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go new file mode 100644 index 00000000000..021c5a01032 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_producer.go @@ -0,0 +1,149 @@ +package sarama + +import "sync" + +// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct +// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer +// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope. +// +// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual +// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`. +// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. +// +// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to +// be set to true in its configuration. +type SyncProducer interface { + + // SendMessage produces a given message, and returns only when it either has + // succeeded or failed to produce. It will return the partition and the offset + // of the produced message, or an error if the message failed to produce. + SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) + + // SendMessages produces a given set of messages, and returns only when all + // messages in the set have either succeeded or failed. Note that messages + // can succeed and fail individually; if some succeed and some fail, + // SendMessages will return an error. + SendMessages(msgs []*ProducerMessage) error + + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. + Close() error +} + +type syncProducer struct { + producer *asyncProducer + wg sync.WaitGroup +} + +// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. +func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { + if config == nil { + config = NewConfig() + config.Producer.Return.Successes = true + } + + if err := verifyProducerConfig(config); err != nil { + return nil, err + } + + p, err := NewAsyncProducer(addrs, config) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewSyncProducerFromClient(client Client) (SyncProducer, error) { + if err := verifyProducerConfig(client.Config()); err != nil { + return nil, err + } + + p, err := NewAsyncProducerFromClient(client) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { + sp := &syncProducer{producer: p} + + sp.wg.Add(2) + go withRecover(sp.handleSuccesses) + go withRecover(sp.handleErrors) + + return sp +} + +func verifyProducerConfig(config *Config) error { + if !config.Producer.Return.Errors { + return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer") + } + if !config.Producer.Return.Successes { + return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer") + } + return nil +} + +func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { + expectation := make(chan *ProducerError, 1) + msg.expectation = expectation + sp.producer.Input() <- msg + + if err := <-expectation; err != nil { + return -1, -1, err.Err + } + + return msg.Partition, msg.Offset, nil +} + +func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { + expectations := make(chan chan *ProducerError, len(msgs)) + go func() { + for _, msg := range msgs { + expectation := make(chan *ProducerError, 1) + msg.expectation = expectation + sp.producer.Input() <- msg + expectations <- expectation + } + close(expectations) + }() + + var errors ProducerErrors + for expectation := range expectations { + if err := <-expectation; err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (sp *syncProducer) handleSuccesses() { + defer sp.wg.Done() + for msg := range sp.producer.Successes() { + expectation := msg.expectation + expectation <- nil + } +} + +func (sp *syncProducer) handleErrors() { + defer sp.wg.Done() + for err := range sp.producer.Errors() { + expectation := err.Msg.expectation + expectation <- err + } +} + +func (sp *syncProducer) Close() error { + sp.producer.AsyncClose() + sp.wg.Wait() + return nil +} diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go new file mode 100644 index 00000000000..372278d0bfa --- /dev/null +++ b/vendor/github.com/Shopify/sarama/timestamp.go @@ -0,0 +1,40 @@ +package sarama + +import ( + "fmt" + "time" +) + +type Timestamp struct { + *time.Time +} + +func (t Timestamp) encode(pe packetEncoder) error { + timestamp := int64(-1) + + if !t.Before(time.Unix(0, 0)) { + timestamp = t.UnixNano() / int64(time.Millisecond) + } else if !t.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)} + } + + pe.putInt64(timestamp) + return nil +} + +func (t Timestamp) decode(pd packetDecoder) error { + millis, err := pd.getInt64() + if err != nil { + return err + } + + // negative timestamps are invalid, in these cases we should return + // a zero time + timestamp := time.Time{} + if millis >= 0 { + timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + + *t.Time = timestamp + return nil +} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go new file mode 100644 index 00000000000..71e95b814cb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go @@ -0,0 +1,126 @@ +package sarama + +type TxnOffsetCommitRequest struct { + TransactionalID string + GroupID string + ProducerID int64 + ProducerEpoch int16 + Topics map[string][]*PartitionOffsetMetadata +} + +func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error { + if err := pe.putString(t.TransactionalID); err != nil { + return err + } + if err := pe.putString(t.GroupID); err != nil { + return err + } + pe.putInt64(t.ProducerID) + pe.putInt16(t.ProducerEpoch) + + if err := pe.putArrayLength(len(t.Topics)); err != nil { + return err + } + for topic, partitions := range t.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for _, partition := range partitions { + if err := partition.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + if t.TransactionalID, err = pd.getString(); err != nil { + return err + } + if t.GroupID, err = pd.getString(); err != nil { + return err + } + if t.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if t.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics = make(map[string][]*PartitionOffsetMetadata) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics[topic] = make([]*PartitionOffsetMetadata, m) + + for j := 0; j < m; j++ { + partitionOffsetMetadata := new(PartitionOffsetMetadata) + if err := partitionOffsetMetadata.decode(pd, version); err != nil { + return err + } + t.Topics[topic][j] = partitionOffsetMetadata + } + } + + return nil +} + +func (a *TxnOffsetCommitRequest) key() int16 { + return 28 +} + +func (a *TxnOffsetCommitRequest) version() int16 { + return 0 +} + +func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type PartitionOffsetMetadata struct { + Partition int32 + Offset int64 + Metadata *string +} + +func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error { + pe.putInt32(p.Partition) + pe.putInt64(p.Offset) + if err := pe.putNullableString(p.Metadata); err != nil { + return err + } + + return nil +} + +func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) { + if p.Partition, err = pd.getInt32(); err != nil { + return err + } + if p.Offset, err = pd.getInt64(); err != nil { + return err + } + if p.Metadata, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go new file mode 100644 index 00000000000..6c980f4066f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go @@ -0,0 +1,83 @@ +package sarama + +import ( + "time" +) + +type TxnOffsetCommitResponse struct { + ThrottleTime time.Duration + Topics map[string][]*PartitionError +} + +func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(t.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(t.Topics)); err != nil { + return err + } + + for topic, e := range t.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(e)); err != nil { + return err + } + for _, partitionError := range e { + if err := partitionError.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics = make(map[string][]*PartitionError) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics[topic] = make([]*PartitionError, m) + + for j := 0; j < m; j++ { + t.Topics[topic][j] = new(PartitionError) + if err := t.Topics[topic][j].decode(pd, version); err != nil { + return err + } + } + } + + return nil +} + +func (a *TxnOffsetCommitResponse) key() int16 { + return 28 +} + +func (a *TxnOffsetCommitResponse) version() int16 { + return 0 +} + +func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/github.com/eapache/go-resiliency/LICENSE new file mode 100644 index 00000000000..698a3f51397 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go new file mode 100644 index 00000000000..f88ca7248b0 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go @@ -0,0 +1,161 @@ +// Package breaker implements the circuit-breaker resiliency pattern for Go. +package breaker + +import ( + "errors" + "sync" + "sync/atomic" + "time" +) + +// ErrBreakerOpen is the error returned from Run() when the function is not executed +// because the breaker is currently open. +var ErrBreakerOpen = errors.New("circuit breaker is open") + +const ( + closed uint32 = iota + open + halfOpen +) + +// Breaker implements the circuit-breaker resiliency pattern +type Breaker struct { + errorThreshold, successThreshold int + timeout time.Duration + + lock sync.Mutex + state uint32 + errors, successes int + lastError time.Time +} + +// New constructs a new circuit-breaker that starts closed. +// From closed, the breaker opens if "errorThreshold" errors are seen +// without an error-free period of at least "timeout". From open, the +// breaker half-closes after "timeout". From half-open, the breaker closes +// after "successThreshold" consecutive successes, or opens on a single error. +func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker { + return &Breaker{ + errorThreshold: errorThreshold, + successThreshold: successThreshold, + timeout: timeout, + } +} + +// Run will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function and pass along its return +// value. It is safe to call Run concurrently on the same Breaker. +func (b *Breaker) Run(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + return b.doWork(state, work) +} + +// Go will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function in a separate goroutine. +// If the function is run, Go will return nil immediately, and will *not* return +// the return value of the function. It is safe to call Go concurrently on the +// same Breaker. +func (b *Breaker) Go(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + // errcheck complains about ignoring the error return value, but + // that's on purpose; if you want an error from a goroutine you have to + // get it over a channel or something + go b.doWork(state, work) + + return nil +} + +func (b *Breaker) doWork(state uint32, work func() error) error { + var panicValue interface{} + + result := func() error { + defer func() { + panicValue = recover() + }() + return work() + }() + + if result == nil && panicValue == nil && state == closed { + // short-circuit the normal, success path without contending + // on the lock + return nil + } + + // oh well, I guess we have to contend on the lock + b.processResult(result, panicValue) + + if panicValue != nil { + // as close as Go lets us come to a "rethrow" although unfortunately + // we lose the original panicing location + panic(panicValue) + } + + return result +} + +func (b *Breaker) processResult(result error, panicValue interface{}) { + b.lock.Lock() + defer b.lock.Unlock() + + if result == nil && panicValue == nil { + if b.state == halfOpen { + b.successes++ + if b.successes == b.successThreshold { + b.closeBreaker() + } + } + } else { + if b.errors > 0 { + expiry := b.lastError.Add(b.timeout) + if time.Now().After(expiry) { + b.errors = 0 + } + } + + switch b.state { + case closed: + b.errors++ + if b.errors == b.errorThreshold { + b.openBreaker() + } else { + b.lastError = time.Now() + } + case halfOpen: + b.openBreaker() + } + } +} + +func (b *Breaker) openBreaker() { + b.changeState(open) + go b.timer() +} + +func (b *Breaker) closeBreaker() { + b.changeState(closed) +} + +func (b *Breaker) timer() { + time.Sleep(b.timeout) + + b.lock.Lock() + defer b.lock.Unlock() + + b.changeState(halfOpen) +} + +func (b *Breaker) changeState(newState uint32) { + b.errors = 0 + b.successes = 0 + atomic.StoreUint32(&b.state, newState) +} diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE new file mode 100644 index 00000000000..5bf3688d9e4 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/eapache/go-xerial-snappy/fuzz.go b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go new file mode 100644 index 00000000000..6a46f4784e1 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go @@ -0,0 +1,16 @@ +// +build gofuzz + +package snappy + +func Fuzz(data []byte) int { + decode, err := Decode(data) + if decode == nil && err == nil { + panic("nil error with nil result") + } + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go new file mode 100644 index 00000000000..ea8f7afeb33 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go @@ -0,0 +1,131 @@ +package snappy + +import ( + "bytes" + "encoding/binary" + "errors" + + master "github.com/golang/snappy" +) + +const ( + sizeOffset = 16 + sizeBytes = 4 +) + +var ( + xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0} + + // This is xerial version 1 and minimally compatible with version 1 + xerialVersionInfo = []byte{0, 0, 0, 1, 0, 0, 0, 1} + + // ErrMalformed is returned by the decoder when the xerial framing + // is malformed + ErrMalformed = errors.New("malformed xerial framing") +) + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +// Encode encodes data as snappy with no framing header. +func Encode(src []byte) []byte { + return master.Encode(nil, src) +} + +// EncodeStream *appends* to the specified 'dst' the compressed +// 'src' in xerial framing format. If 'dst' does not have enough +// capacity, then a new slice will be allocated. If 'dst' has +// non-zero length, then if *must* have been built using this function. +func EncodeStream(dst, src []byte) []byte { + if len(dst) == 0 { + dst = append(dst, xerialHeader...) + dst = append(dst, xerialVersionInfo...) + } + + // Snappy encode in blocks of maximum 32KB + var ( + max = len(src) + blockSize = 32 * 1024 + pos = 0 + chunk []byte + ) + + for pos < max { + newPos := min(pos + blockSize, max) + chunk = master.Encode(chunk[:cap(chunk)], src[pos:newPos]) + + // First encode the compressed size (big-endian) + // Put* panics if the buffer is too small, so pad 4 bytes first + origLen := len(dst) + dst = append(dst, dst[0:4]...) + binary.BigEndian.PutUint32(dst[origLen:], uint32(len(chunk))) + + // And now the compressed data + dst = append(dst, chunk...) + pos = newPos + } + return dst +} + +// Decode decodes snappy data whether it is traditional unframed +// or includes the xerial framing format. +func Decode(src []byte) ([]byte, error) { + return DecodeInto(nil, src) +} + +// DecodeInto decodes snappy data whether it is traditional unframed +// or includes the xerial framing format into the specified `dst`. +// It is assumed that the entirety of `dst` including all capacity is available +// for use by this function. If `dst` is nil *or* insufficiently large to hold +// the decoded `src`, new space will be allocated. +func DecodeInto(dst, src []byte) ([]byte, error) { + var max = len(src) + if max < len(xerialHeader) { + return nil, ErrMalformed + } + + if !bytes.Equal(src[:8], xerialHeader) { + return master.Decode(dst[:cap(dst)], src) + } + + if max < sizeOffset+sizeBytes { + return nil, ErrMalformed + } + + if dst == nil { + dst = make([]byte, 0, len(src)) + } + + dst = dst[:0] + var ( + pos = sizeOffset + chunk []byte + err error + ) + + for pos+sizeBytes <= max { + size := int(binary.BigEndian.Uint32(src[pos : pos+sizeBytes])) + pos += sizeBytes + + nextPos := pos + size + // On architectures where int is 32-bytes wide size + pos could + // overflow so we need to check the low bound as well as the + // high + if nextPos < pos || nextPos > max { + return nil, ErrMalformed + } + + chunk, err = master.Decode(chunk[:cap(chunk)], src[pos:nextPos]) + + if err != nil { + return nil, err + } + pos = nextPos + dst = append(dst, chunk...) + } + return dst, nil +} diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE new file mode 100644 index 00000000000..d5f36dbcaaf --- /dev/null +++ b/vendor/github.com/eapache/queue/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go new file mode 100644 index 00000000000..71d1acdf27b --- /dev/null +++ b/vendor/github.com/eapache/queue/queue.go @@ -0,0 +1,102 @@ +/* +Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. +*/ +package queue + +// minQueueLen is smallest capacity that queue may have. +// Must be power of 2 for bitwise modulus: x % n == x & (n - 1). +const minQueueLen = 16 + +// Queue represents a single instance of the queue data structure. +type Queue struct { + buf []interface{} + head, tail, count int +} + +// New constructs and returns a new Queue. +func New() *Queue { + return &Queue{ + buf: make([]interface{}, minQueueLen), + } +} + +// Length returns the number of elements currently stored in the queue. +func (q *Queue) Length() int { + return q.count +} + +// resizes the queue to fit exactly twice its current contents +// this can result in shrinking if the queue is less than half-full +func (q *Queue) resize() { + newBuf := make([]interface{}, q.count<<1) + + if q.tail > q.head { + copy(newBuf, q.buf[q.head:q.tail]) + } else { + n := copy(newBuf, q.buf[q.head:]) + copy(newBuf[n:], q.buf[:q.tail]) + } + + q.head = 0 + q.tail = q.count + q.buf = newBuf +} + +// Add puts an element on the end of the queue. +func (q *Queue) Add(elem interface{}) { + if q.count == len(q.buf) { + q.resize() + } + + q.buf[q.tail] = elem + // bitwise modulus + q.tail = (q.tail + 1) & (len(q.buf) - 1) + q.count++ +} + +// Peek returns the element at the head of the queue. This call panics +// if the queue is empty. +func (q *Queue) Peek() interface{} { + if q.count <= 0 { + panic("queue: Peek() called on empty queue") + } + return q.buf[q.head] +} + +// Get returns the element at index i in the queue. If the index is +// invalid, the call will panic. This method accepts both positive and +// negative index values. Index 0 refers to the first element, and +// index -1 refers to the last. +func (q *Queue) Get(i int) interface{} { + // If indexing backwards, convert to positive index. + if i < 0 { + i += q.count + } + if i < 0 || i >= q.count { + panic("queue: Get() called with index out of range") + } + // bitwise modulus + return q.buf[(q.head+i)&(len(q.buf)-1)] +} + +// Remove removes and returns the element from the front of the queue. If the +// queue is empty, the call will panic. +func (q *Queue) Remove() interface{} { + if q.count <= 0 { + panic("queue: Remove() called on empty queue") + } + ret := q.buf[q.head] + q.buf[q.head] = nil + // bitwise modulus + q.head = (q.head + 1) & (len(q.buf) - 1) + q.count-- + // Resize down if buffer 1/4 full. + if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) { + q.resize() + } + return ret +} diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 00000000000..bcfa19520af --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 00000000000..931ae31606f --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 00000000000..6050c10f4c8 --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 00000000000..72efb0353dd --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 00000000000..fcd192b849e --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 00000000000..e6179f65e35 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 00000000000..8c9f2049bc7 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 00000000000..8d393e904bb --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 00000000000..150d91bc8be --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 00000000000..adfd979fe27 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 00000000000..dbcae905e6e --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 00000000000..ece692ea461 --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/knative/test-infra/.github/pull-request-template.md b/vendor/github.com/knative/test-infra/.github/pull-request-template.md deleted file mode 100644 index 9b2b7820f61..00000000000 --- a/vendor/github.com/knative/test-infra/.github/pull-request-template.md +++ /dev/null @@ -1,7 +0,0 @@ - - -Fixes # diff --git a/vendor/github.com/knative/test-infra/CONTRIBUTING.md b/vendor/github.com/knative/test-infra/CONTRIBUTING.md deleted file mode 100644 index bcfe857fda4..00000000000 --- a/vendor/github.com/knative/test-infra/CONTRIBUTING.md +++ /dev/null @@ -1,3 +0,0 @@ -# Contribution guidelines - -So you want to hack on Knative Test Infrastructure? Yay! Please refer to Knative's overall [contribution guidelines](https://github.com/knative/docs/blob/master/community/CONTRIBUTING.md) to find out how you can help. diff --git a/vendor/github.com/knative/test-infra/Gopkg.lock b/vendor/github.com/knative/test-infra/Gopkg.lock deleted file mode 100644 index e0347ada5a9..00000000000 --- a/vendor/github.com/knative/test-infra/Gopkg.lock +++ /dev/null @@ -1,28 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "github.com/google/licenseclassifier" - packages = [ - ".", - "internal/sets", - "stringclassifier", - "stringclassifier/internal/pq", - "stringclassifier/searchset", - "stringclassifier/searchset/tokenizer" - ] - revision = "3c8ad1f0b0644b6646210ee9cf2f34ff907e2e18" - -[[projects]] - name = "github.com/sergi/go-diff" - packages = ["diffmatchpatch"] - revision = "1744e2970ca51c86172c8190fadad617561ed6e7" - version = "v1.0.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "aea50f9014005bedc3dc202c5fbf9d2d8c7a6f7beac2337fd863b23f411c4125" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/knative/test-infra/Gopkg.toml b/vendor/github.com/knative/test-infra/Gopkg.toml deleted file mode 100644 index 1a03ba55a89..00000000000 --- a/vendor/github.com/knative/test-infra/Gopkg.toml +++ /dev/null @@ -1,14 +0,0 @@ -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. - -required = [ - "github.com/google/licenseclassifier/licenses", -] - -# TODO(mattmoor): Find a way to bundle the licenseclassifier's -# license database, so folks don't have to go get it. - -[prune] - go-tests = true - unused-packages = true - non-go = true diff --git a/vendor/github.com/knative/test-infra/LICENSE b/vendor/github.com/knative/test-infra/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/github.com/knative/test-infra/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/knative/test-infra/OWNERS b/vendor/github.com/knative/test-infra/OWNERS deleted file mode 100644 index ed29d40151f..00000000000 --- a/vendor/github.com/knative/test-infra/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- adrcunha -- jessiezcc -- srinivashegde86 -- steuhs diff --git a/vendor/github.com/knative/test-infra/README.md b/vendor/github.com/knative/test-infra/README.md deleted file mode 100644 index 88f40521e11..00000000000 --- a/vendor/github.com/knative/test-infra/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Knative Test Infrastructure - -The `test-infra` repository contains a collection of tools for testing Knative, collecting metrics -and displaying test results. - -## High level architecture - -Knative uses [Prow](https://github.com/kubernetes/test-infra/tree/master/prow) to schedule testing and update issues. - -### Gubernator - -Knative uses [gubernator](https://github.com/kubernetes/test-infra) to provide -a [PR dashboard](https://gubernator.knative.dev/pr) for contributions in the Knative github organization. - -### E2E Testing - -Our E2E testing uses [kubetest](https://github.com/kubernetes/test-infra/blob/master/kubetest) to build/deploy/test Knative clusters. diff --git a/vendor/github.com/knative/test-infra/WORKSPACE b/vendor/github.com/knative/test-infra/WORKSPACE deleted file mode 100644 index 91db673d566..00000000000 --- a/vendor/github.com/knative/test-infra/WORKSPACE +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Required rules for building kubernetes/test-infra -# These all come from http://github.com/kubernetes/test-infra/blob/master/WORKSPACE - -http_archive( - name = "io_bazel_rules_go", - sha256 = "1868ff68d6079e31b2f09b828b58d62e57ca8e9636edff699247c9108518570b", - url = "https://github.com/bazelbuild/rules_go/releases/download/0.11.1/rules_go-0.11.1.tar.gz", -) - -load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains") - -go_rules_dependencies() - -go_register_toolchains( - go_version = "1.10.2", -) - -git_repository( - name = "io_bazel_rules_k8s", - commit = "3756369d4920033c32c12d16207e8ee14fee1b18", - remote = "https://github.com/bazelbuild/rules_k8s.git", -) - -http_archive( - name = "io_bazel_rules_docker", - sha256 = "cef4e7adfc1df999891e086bf42bed9092cfdf374adb902f18de2c1d6e1e0197", - strip_prefix = "rules_docker-198367210c55fba5dded22274adde1a289801dc4", - urls = ["https://github.com/bazelbuild/rules_docker/archive/198367210c55fba5dded22274adde1a289801dc4.tar.gz"], -) - -# External repositories - -git_repository( - name = "k8s", - remote = "http://github.com/kubernetes/test-infra.git", - commit = "dd12621d6178838097847abf5842ad8d08fc9308", # HEAD as of 8/1/2018 -) - diff --git a/vendor/github.com/knative/test-infra/ci/README.md b/vendor/github.com/knative/test-infra/ci/README.md deleted file mode 100644 index 51b28eddac4..00000000000 --- a/vendor/github.com/knative/test-infra/ci/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Continuous Integration / Continuous Deployment system - -This directory contains the configs for all systems related to Knative's CI/CD system. diff --git a/vendor/github.com/knative/test-infra/ci/gubernator/Makefile b/vendor/github.com/knative/test-infra/ci/gubernator/Makefile deleted file mode 100644 index 5307d95d94f..00000000000 --- a/vendor/github.com/knative/test-infra/ci/gubernator/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -SRC := test-infra/gubernator - -deploy: - # Fetch latest source, patch for our instance - rm -fr test-infra - git clone http://github.com/kubernetes/test-infra.git - cp config.yaml $(SRC) - cp redir_github.py $(SRC) - sed -i -e '/^runtime: .*/a service: gubernator' $(SRC)/app.yaml - sed -i -e "/^handlers:/a\- url: /timeline\n script: redir_github.app\n" $(SRC)/app.yaml - sed -i -e 's/user:kubernetes/user:knative/' $(SRC)/view_pr.py - sed -i -e 's/Kubernetes/Knative/' $(SRC)/templates/index.html - sed -i -e 's/k8s-testgrid.appspot.com/testgrid.knative.dev/' $(SRC)/filters.py - sed -i -e 's/k8s-testgrid/knative-testgrid/' $(SRC)/testgrid.py - # Deploy - make -C ../prow get-cluster-credentials - PROJECT=knative-tests make -C $(SRC) deploy - # Cleanup - rm -fr test-infra diff --git a/vendor/github.com/knative/test-infra/ci/gubernator/README.md b/vendor/github.com/knative/test-infra/ci/gubernator/README.md deleted file mode 100644 index 14508c3dfda..00000000000 --- a/vendor/github.com/knative/test-infra/ci/gubernator/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Gubernator config - -This directory contains the config for our [Gubernator](https://github.com/kubernetes/test-infra/tree/master/gubernator) instance, plus a makefile for deploying it. - -* `config.yaml` Gubernator configuration. -* `Makefile` Recipe for deploying a Gubernator instance. -* `redir_github.py` Simple redirection handler to Gubernator's GitHub service. diff --git a/vendor/github.com/knative/test-infra/ci/gubernator/config.yaml b/vendor/github.com/knative/test-infra/ci/gubernator/config.yaml deleted file mode 100644 index 794b4ce2bdd..00000000000 --- a/vendor/github.com/knative/test-infra/ci/gubernator/config.yaml +++ /dev/null @@ -1,71 +0,0 @@ - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -default_external_services: - gcs_pull_prefix: knative-prow/pr-logs/pull - prow_url: prow.knative.dev -default_org: knative -default_repo: serving -external_services: - knative: - gcs_bucket: knative-prow/ - gcs_pull_prefix: knative-prow/pr-logs/pull - prow_url: prow.knative.dev -jobs: - knative-prow/pr-logs/directory/: - - pull-knative-serving-build-tests - - pull-knative-serving-integration-tests - - pull-knative-serving-unit-tests - - pull-knative-eventing-build-tests - - pull-knative-eventing-integration-tests - - pull-knative-eventing-unit-tests - - pull-knative-eventing-sources-build-tests - - pull-knative-eventing-sources-integration-tests - - pull-knative-eventing-sources-unit-tests - - pull-knative-docs-build-tests - - pull-knative-docs-unit-tests - - pull-knative-docs-integration-tests - - pull-knative-build-templates-unit-tests - - pull-knative-build-templates-build-tests - - pull-knative-build-templates-integration-tests - - pull-knative-build-pipeline-build-tests - - pull-knative-build-pipeline-unit-tests - - pull-knative-build-build-tests - - pull-knative-build-unit-tests - - pull-knative-build-integration-tests - - pull-knative-pkg-build-tests - - pull-knative-pkg-unit-tests - - pull-knative-pkg-integration-tests - - pull-knative-test-infra-build-tests - - pull-knative-test-infra-unit-tests - - pull-knative-test-infra-integration-tests - - pull-knative-caching-build-tests - - pull-knative-caching-unit-tests - - pull-knative-caching-integration-tests - knative-prow/logs/: - - ci-knative-serving-continuous - - ci-knative-serving-release - - ci-knative-serving-playground - - ci-knative-build-continuous - - ci-knative-build-release - - ci-knative-eventing-continuous - - ci-knative-eventing-release - - ci-knative-eventing-sources-continuous - - ci-knative-eventing-sources-release - - ci-knative-build-templates-continuous - - ci-knative-docs-continuous - - ci-knative-pkg-continuous - - ci-knative-caching-continuous -recursive_artifacts: false diff --git a/vendor/github.com/knative/test-infra/ci/gubernator/redir_github.py b/vendor/github.com/knative/test-infra/ci/gubernator/redir_github.py deleted file mode 100644 index e168d6adbc5..00000000000 --- a/vendor/github.com/knative/test-infra/ci/gubernator/redir_github.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Simple redirection handler to Gubernator's GitHub service.""" - -import webapp2 - -class GitHubRedirect(webapp2.RequestHandler): - def get(self): - self.redirect("https://github-dot-knative-tests.appspot.com" + self.request.path_qs) - -app = webapp2.WSGIApplication([(r'/.*', GitHubRedirect),], debug=True, config={}) diff --git a/vendor/github.com/knative/test-infra/ci/prow/Makefile b/vendor/github.com/knative/test-infra/ci/prow/Makefile deleted file mode 100644 index 9b6fcfbea17..00000000000 --- a/vendor/github.com/knative/test-infra/ci/prow/Makefile +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -CLUSTER ?= prow -PROJECT ?= knative-tests -ZONE ?= us-central1-f -JOB_NAMESPACE ?= test-pods - -PROW_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) - -get-cluster-credentials: - gcloud container clusters get-credentials "$(CLUSTER)" --project="$(PROJECT)" --zone="$(ZONE)" - -update-config: get-cluster-credentials - kubectl create configmap config --from-file=config.yaml=config.yaml --dry-run -o yaml | kubectl replace configmap config -f - - -update-plugins: get-cluster-credentials - kubectl create configmap plugins --from-file=plugins.yaml=plugins.yaml --dry-run -o yaml | kubectl replace configmap plugins -f - - -update-boskos: get-cluster-credentials - kubectl apply -f boskos/config.yaml - -update-boskos-config: get-cluster-credentials - kubectl create configmap resources --from-file=config=boskos/resources.yaml --dry-run -o yaml | kubectl --namespace="$(JOB_NAMESPACE)" replace configmap resources -f - - -update-cluster: get-cluster-credentials - kubectl apply -f cluster.yaml - -test: - bazel run @k8s//prow/cmd/config -- --plugin-config=$(PROW_DIR)/plugins.yaml - bazel run @k8s//prow/cmd/config -- --config-path=$(PROW_DIR)/config.yaml diff --git a/vendor/github.com/knative/test-infra/ci/prow/README.md b/vendor/github.com/knative/test-infra/ci/prow/README.md deleted file mode 100644 index 04fd12e6bb2..00000000000 --- a/vendor/github.com/knative/test-infra/ci/prow/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Prow config - -This directory contains the config for our [Prow](https://github.com/kubernetes/test-infra/tree/master/prow) instance. - -* `boskos` Configuration for the Boskos instance. -* `Makefile` Commands to interact with the Prow instance regarding updates. -* `cluster.yaml` Configuration of the Prow cluster. -* `config.yaml` Configuration of the Prow jobs. -* `config_start.yaml` Initial, empty configuration for Prow. -* `plugins.yaml` Configuration of the Prow plugins. diff --git a/vendor/github.com/knative/test-infra/ci/prow/boskos/README.md b/vendor/github.com/knative/test-infra/ci/prow/boskos/README.md deleted file mode 100644 index 8e6e90ab6b1..00000000000 --- a/vendor/github.com/knative/test-infra/ci/prow/boskos/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Boskos config - -This directory contains the config for our [Boskos](https://github.com/kubernetes/test-infra/tree/master/boskos) instance. - -* `config.yaml` Boskos configuration. -* `resources.yaml` Pool of projects used by Boskos. diff --git a/vendor/github.com/knative/test-infra/ci/prow/boskos/config.yaml b/vendor/github.com/knative/test-infra/ci/prow/boskos/config.yaml deleted file mode 100644 index a444d6cff8b..00000000000 --- a/vendor/github.com/knative/test-infra/ci/prow/boskos/config.yaml +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Boskos deployment for Knative Prow instance ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - labels: - app: boskos - name: boskos-storage - namespace: test-pods -spec: - claimRef: - name: boskos-volume-boskos-0 - namespace: test-pods - capacity: - storage: 1Gi - accessModes: - - ReadWriteOnce - persistentVolumeReclaimPolicy: Retain - gcePersistentDisk: - pdName: boskos-storage - fsType: ext4 ---- -# Start of StatefulSet -apiVersion: apps/v1beta1 -kind: StatefulSet -metadata: - name: boskos - namespace: test-pods -spec: - serviceName: "boskos" - replicas: 1 # one canonical source of resources - template: - metadata: - labels: - app: boskos - namespace: test-pods - spec: - serviceAccountName: "boskos" - terminationGracePeriodSeconds: 30 - containers: - - name: boskos - image: gcr.io/k8s-testimages/boskos:v20180405-12e892d69 - args: - - --storage=/store/boskos.json - - --config=/etc/config/config - - --namespace=test-pods - ports: - - containerPort: 8080 - protocol: TCP - volumeMounts: - - name: boskos-volume - mountPath: /store - - name: boskos-config - mountPath: /etc/config - readOnly: true - volumes: - - name: boskos-config - configMap: - name: resources - volumeClaimTemplates: - - metadata: - name: boskos-volume - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: boskos - resources: - requests: - storage: 1Gi ---- -apiVersion: v1 -kind: Service -metadata: - name: boskos - namespace: test-pods -spec: - selector: - app: boskos - ports: - - name: default - protocol: TCP - port: 80 - targetPort: 8080 ---- -# Janitor -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: boskos-janitor - labels: - app: boskos-janitor - namespace: test-pods -spec: - replicas: 3 # 3 distributed janitor instances - template: - metadata: - labels: - app: boskos-janitor - spec: - serviceAccountName: "boskos" - terminationGracePeriodSeconds: 300 - containers: - - name: boskos-janitor - image: gcr.io/k8s-testimages/janitor:v20180619-83c62c891 - args: - - --service-account=/etc/service-account/service-account.json - - --resource-type=gke-project - - --pool-size=10 - volumeMounts: - - mountPath: /etc/service-account - name: service - readOnly: true - volumes: - - name: service - secret: - secretName: service-account ---- -# Reaper -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: boskos-reaper - labels: - app: boskos-reaper - namespace: test-pods -spec: - replicas: 1 # one canonical source of resources - template: - metadata: - labels: - app: boskos-reaper - spec: - serviceAccountName: "boskos" - terminationGracePeriodSeconds: 30 - containers: - - name: boskos-reaper - image: gcr.io/k8s-testimages/reaper:v20180402-43203f868 - args: - - --resource-type=gke-project diff --git a/vendor/github.com/knative/test-infra/ci/prow/boskos/config_start.yaml b/vendor/github.com/knative/test-infra/ci/prow/boskos/config_start.yaml deleted file mode 100644 index dc8d66b6295..00000000000 --- a/vendor/github.com/knative/test-infra/ci/prow/boskos/config_start.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Initial configuration of Boskos - -apiVersion: v1 -kind: ConfigMap -metadata: - name: resources - namespace: test-pods -data: - resources: "" diff --git a/vendor/github.com/knative/test-infra/ci/prow/boskos/resources.yaml b/vendor/github.com/knative/test-infra/ci/prow/boskos/resources.yaml deleted file mode 100644 index 58f734358f6..00000000000 --- a/vendor/github.com/knative/test-infra/ci/prow/boskos/resources.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -resources: -- names: - - knative-boskos-01 - - knative-boskos-02 - - knative-boskos-03 - - knative-boskos-04 - - knative-boskos-05 - - knative-boskos-06 - - knative-boskos-07 - - knative-boskos-08 - - knative-boskos-09 - - knative-boskos-10 - - knative-boskos-11 - - knative-boskos-12 - - knative-boskos-13 - - knative-boskos-14 - - knative-boskos-15 - - knative-boskos-16 - - knative-boskos-17 - - knative-boskos-18 - - knative-boskos-19 - - knative-boskos-20 - state: dirty - type: gke-project diff --git a/vendor/github.com/knative/test-infra/ci/prow/cluster.yaml b/vendor/github.com/knative/test-infra/ci/prow/cluster.yaml deleted file mode 100644 index 5031611756f..00000000000 --- a/vendor/github.com/knative/test-infra/ci/prow/cluster.yaml +++ /dev/null @@ -1,350 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file contains Kubernetes YAML files for the most important prow components. ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: prowjobs.prow.k8s.io -spec: - group: prow.k8s.io - version: v1 - names: - kind: ProwJob - singular: prowjob - plural: prowjobs - scope: Namespaced - validation: - openAPIV3Schema: - properties: - spec: - properties: - max_concurrency: - type: integer - minimum: 0 - type: - type: string - enum: - - "presubmit" - - "postsubmit" - - "periodic" - - "batch" - status: - properties: - state: - type: string - enum: - - "triggered" - - "pending" - - "success" - - "failure" - - "aborted" - - "error" - anyOf: - - not: - properties: - state: - type: string - enum: - - "success" - - "failure" - - "error" - - "aborted" - - required: - - completionTime ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: hook - labels: - app: hook -spec: - replicas: 2 - strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - template: - metadata: - labels: - app: hook - spec: - serviceAccountName: "hook" - terminationGracePeriodSeconds: 180 - containers: - - name: hook - image: gcr.io/k8s-prow/hook:v20181023-ca14137 - imagePullPolicy: Always - args: - - --dry-run=false - ports: - - name: http - containerPort: 8888 - volumeMounts: - - name: hmac - mountPath: /etc/webhook - readOnly: true - - name: oauth - mountPath: /etc/github - readOnly: true - - name: config - mountPath: /etc/config - readOnly: true - - name: plugins - mountPath: /etc/plugins - readOnly: true - volumes: - - name: hmac - secret: - secretName: hmac-token - - name: oauth - secret: - secretName: oauth-token - - name: config - configMap: - name: config - - name: plugins - configMap: - name: plugins ---- -apiVersion: v1 -kind: Service -metadata: - name: hook -spec: - selector: - app: hook - ports: - - port: 8888 - type: NodePort ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: plank - labels: - app: plank -spec: - replicas: 1 # Do not scale up. - template: - metadata: - labels: - app: plank - spec: - serviceAccountName: "plank" - containers: - - name: plank - image: gcr.io/k8s-prow/plank:v20180709-7109caeb1 - args: - - --dry-run=false - volumeMounts: - - name: oauth - mountPath: /etc/github - readOnly: true - - name: config - mountPath: /etc/config - readOnly: true - volumes: - - name: oauth - secret: - secretName: oauth-token - - name: config - configMap: - name: config ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: sinker - labels: - app: sinker -spec: - replicas: 1 - template: - metadata: - labels: - app: sinker - spec: - serviceAccountName: "sinker" - containers: - - name: sinker - image: gcr.io/k8s-prow/sinker:v20180709-7109caeb1 - volumeMounts: - - name: config - mountPath: /etc/config - readOnly: true - volumes: - - name: config - configMap: - name: config ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: deck - labels: - app: deck -spec: - replicas: 2 - strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - template: - metadata: - labels: - app: deck - spec: - serviceAccountName: "deck" - terminationGracePeriodSeconds: 30 - containers: - - name: deck - image: gcr.io/k8s-prow/deck:v20180709-7109caeb1 - args: - - --hook-url=http://hook:8888/plugin-help - - --tide-url=http://tide/ - ports: - - name: http - containerPort: 8080 - volumeMounts: - - name: config - mountPath: /etc/config - readOnly: true - volumes: - - name: config - configMap: - name: config ---- -apiVersion: v1 -kind: Service -metadata: - name: deck -spec: - selector: - app: deck - ports: - - port: 80 - targetPort: 8080 - type: NodePort ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: horologium - labels: - app: horologium -spec: - replicas: 1 - template: - metadata: - labels: - app: horologium - spec: - serviceAccountName: "horologium" - terminationGracePeriodSeconds: 30 - containers: - - name: horologium - image: gcr.io/k8s-prow/horologium:v20180709-7109caeb1 - volumeMounts: - - name: config - mountPath: /etc/config - readOnly: true - volumes: - - name: config - configMap: - name: config - -# Ingresses - ---- -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: deck-ing - annotations: - kubernetes.io/ingress.class: "gce" - kubernetes.io/ingress.global-static-ip-name: prow-ingress -spec: - tls: - - secretName: tls-secret - hosts: - - prow.knative.dev - rules: - - host: prow.knative.dev - http: - paths: - - path: /* - backend: - serviceName: deck - servicePort: 80 - - path: /hook - backend: - serviceName: hook - servicePort: 8888 - -# Tide - -apiVersion: v1 -kind: Service -metadata: - name: tide -spec: - selector: - app: tide - ports: - - port: 80 - targetPort: 8888 - type: NodePort ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: tide - labels: - app: tide -spec: - replicas: 1 # Do not scale up. - template: - metadata: - labels: - app: tide - spec: - serviceAccountName: "tide" - containers: - - name: tide - image: gcr.io/k8s-prow/tide:v20180808-68cee5a41 - args: - - --dry-run=false - ports: - - name: http - containerPort: 8888 - volumeMounts: - - name: oauth - mountPath: /etc/github - readOnly: true - - name: config - mountPath: /etc/config - readOnly: true - volumes: - - name: oauth - secret: - secretName: oauth-token - - name: config - configMap: - name: config - diff --git a/vendor/github.com/knative/test-infra/ci/prow/config.yaml b/vendor/github.com/knative/test-infra/ci/prow/config.yaml deleted file mode 100644 index 86b707338af..00000000000 --- a/vendor/github.com/knative/test-infra/ci/prow/config.yaml +++ /dev/null @@ -1,2211 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -plank: - job_url_template: 'https://gubernator.knative.dev/build/knative-prow/{{if or (eq .Spec.Type "presubmit") (eq .Spec.Type "batch")}}pr-logs/pull{{with .Spec.Refs}}/{{.Org}}_{{.Repo}}{{end}}{{else}}logs{{end}}{{if eq .Spec.Type "presubmit"}}/{{with index .Spec.Refs.Pulls 0}}{{.Number}}{{end}}{{else if eq .Spec.Type "batch"}}/batch{{end}}/{{.Spec.Job}}/{{.Status.BuildID}}/' - report_template: '[Full PR test history](https://gubernator.knative.dev/pr/{{.Spec.Refs.Org}}_{{.Spec.Refs.Repo}}/{{with index .Spec.Refs.Pulls 0}}{{.Number}}{{end}}). [Your PR dashboard](https://gubernator.knative.dev/pr/{{with index .Spec.Refs.Pulls 0}}{{.Author}}{{end}}).' - pod_pending_timeout: 60m - default_decoration_config: - timeout: 7200000000000 # 2h - grace_period: 15000000000 # 15s - utility_images: - clonerefs: "gcr.io/k8s-prow/clonerefs@sha256:b62ba1f379ac19c5ec9ee7bcab14d3f0b3c31cea9cdd4bc491e98e2c5f346c07" - initupload: "gcr.io/k8s-prow/initupload@sha256:58f89f2aae68f7dc46aaf05c7e8204c4f26b53ec9ce30353d1c27ce44a60d121" - entrypoint: "gcr.io/k8s-prow/entrypoint:v20180512-0255926d1" - sidecar: "gcr.io/k8s-prow/sidecar@sha256:8807b2565f4d2699920542fcf890878824b1ede4198d7ff46bca53feb064ed44" - gcs_configuration: - bucket: "knative-prow" - path_strategy: "explicit" - gcs_credentials_secret: "service-account" - -prowjob_namespace: default -pod_namespace: test-pods -log_level: info - -branch-protection: - orgs: - knative: - # Protect all branches in knative - # This means all prow jobs with "always_run" set are required - # to pass before tide can merge the PR. - # Currently this is manually enabled by the knative org admins, - # but it's stated here for documentation and reference purposes. - protect: true - # Admins can overrule checks - enforce_admins: false - -tide: - queries: - - repos: - - knative/build - - knative/build-pipeline - - knative/build-templates - - knative/serving - - knative/eventing - - knative/eventing-sources - - knative/docs - - knative/test-infra - - knative/pkg - - knative/caching - labels: - - lgtm - - approved - missingLabels: - - do-not-merge/hold - - do-not-merge/work-in-progress - merge_method: - knative: squash - knative/build-pipeline: rebase - target_url: https://prow.knative.dev/tide.html - -presets: -- labels: - preset-service-account: "true" - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /etc/service-account/service-account.json - volumes: - - name: service - secret: - secretName: service-account - volumeMounts: - - name: service - mountPath: /etc/service-account - readOnly: true -# storage / caching presets -- labels: - preset-bazel-scratch-dir: "true" - env: - - name: TEST_TMPDIR - value: /bazel-scratch/.cache/bazel - volumes: - - name: bazel-scratch - emptyDir: {} - volumeMounts: - - name: bazel-scratch - mountPath: /bazel-scratch/.cache -- labels: - preset-bazel-remote-cache-enabled: "false" - env: - - name: BAZEL_REMOTE_CACHE_ENABLED - value: "false" -# docker-in-docker presets -- labels: - preset-dind-enabled: "true" - env: - - name: DOCKER_IN_DOCKER_ENABLED - value: "true" - volumes: - - name: docker-graph - emptyDir: {} - volumeMounts: - - name: docker-graph - mountPath: /docker-graph - -presubmits: - knative/serving: - - name: pull-knative-serving-build-tests - agent: kubernetes - context: pull-knative-serving-build-tests - always_run: true - rerun_command: "/test pull-knative-serving-build-tests" - trigger: "(?m)^/test (all|pull-knative-serving-build-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--build-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-serving-unit-tests - agent: kubernetes - context: pull-knative-serving-unit-tests - always_run: true - rerun_command: "/test pull-knative-serving-unit-tests" - trigger: "(?m)^/test (all|pull-knative-serving-unit-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--unit-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-serving-integration-tests - agent: kubernetes - context: pull-knative-serving-integration-tests - always_run: true - rerun_command: "/test pull-knative-serving-integration-tests" - trigger: "(?m)^/test (all|pull-knative-serving-integration-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--integration-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-serving-go-coverage - labels: - preset-service-account: "true" - agent: kubernetes - context: pull-knative-serving-go-coverage - always_run: true - rerun_command: "/test pull-knative-serving-go-coverage" - trigger: "(?m)^/test (all|pull-knative-serving-go-coverage),?(\\s+|$)" - optional: true - decorate: true - clone_uri: "https://github.com/knative/serving.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--postsubmit-gcs-bucket=knative-prow" - - "--postsubmit-job-name=post-knative-serving-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=50" - - "--github-token=/etc/github-token/token" - volumeMounts: - - name: github-token - mountPath: /etc/github-token - readOnly: true - volumes: - - name: github-token - secret: - secretName: covbot-token - - - name: pull-knative-serving-go-coverage-dev - labels: - preset-service-account: "true" - agent: kubernetes - context: pull-knative-serving-go-coverage-dev - always_run: false - rerun_command: "/test pull-knative-serving-go-coverage-dev" - trigger: "(?m)^/test (pull-knative-serving-go-coverage-dev),?(\\s+|$)" - optional: true - decorate: true - clone_uri: "https://github.com/knative/serving.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage-dev:latest-dev - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--postsubmit-gcs-bucket=knative-prow" - - "--postsubmit-job-name=post-knative-serving-go-coverage" - - "--profile-name=coverage_profile.txt" - - "--artifacts=$(ARTIFACTS)" - - "--cov-target=." - - "--cov-threshold-percentage=81" - - "--github-token=/etc/github-token/token" - volumeMounts: - - name: github-token - mountPath: /etc/github-token - readOnly: true - volumes: - - name: github-token - secret: - secretName: covbot-token - - knative/build: - - name: pull-knative-build-build-tests - agent: kubernetes - context: pull-knative-build-build-tests - always_run: true - rerun_command: "/test pull-knative-build-build-tests" - trigger: "(?m)^/test (all|pull-knative-build-build-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--build-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-build-unit-tests - agent: kubernetes - context: pull-knative-build-unit-tests - always_run: true - rerun_command: "/test pull-knative-build-unit-tests" - trigger: "(?m)^/test (all|pull-knative-build-unit-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--unit-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-build-integration-tests - agent: kubernetes - context: pull-knative-build-integration-tests - always_run: true - rerun_command: "/test pull-knative-build-integration-tests" - trigger: "(?m)^/test (all|pull-knative-build-integration-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - preset-dind-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--integration-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-build-go-coverage - labels: - preset-service-account: "true" - agent: kubernetes - context: pull-knative-build-go-coverage - always_run: true - rerun_command: "/test pull-knative-build-go-coverage" - trigger: "(?m)^/test (all|pull-knative-build-go-coverage),?(\\s+|$)" - optional: true - decorate: true - clone_uri: "https://github.com/knative/build.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--postsubmit-gcs-bucket=knative-prow" - - "--postsubmit-job-name=post-knative-build-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=50" - - "--github-token=/etc/github-token/token" - volumeMounts: - - name: github-token - mountPath: /etc/github-token - readOnly: true - volumes: - - name: github-token - secret: - secretName: covbot-token - - knative/build-pipeline: - - name: pull-knative-build-pipeline-build-tests - agent: kubernetes - context: pull-knative-build-pipeline-build-tests - always_run: true - rerun_command: "/test pull-knative-build-pipeline-build-tests" - trigger: "(?m)^/test (all|pull-knative-build-pipeline-build-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--build-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-build-pipeline-unit-tests - agent: kubernetes - context: pull-knative-build-pipeline-unit-tests - always_run: true - rerun_command: "/test pull-knative-build-pipeline-unit-tests" - trigger: "(?m)^/test (all|pull-knative-build-pipeline-unit-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--unit-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-build-pipeline-integration-tests - agent: kubernetes - context: pull-knative-build-pipeline-integration-tests - always_run: true - rerun_command: "/test pull-knative-build-pipeline-integration-tests" - trigger: "(?m)^/test (all|pull-knative-build-pipeline-integration-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--integration-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-build-pipeline-go-coverage - labels: - preset-service-account: "true" - agent: kubernetes - context: pull-knative-build-pipeline-go-coverage - always_run: true - rerun_command: "/test pull-knative-build-pipeline-go-coverage" - trigger: "(?m)^/test (all|pull-knative-build-pipeline-go-coverage),?(\\s+|$)" - optional: true - decorate: true - clone_uri: "https://github.com/knative/build-pipeline.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--postsubmit-gcs-bucket=knative-prow" - - "--postsubmit-job-name=post-knative-build-pipeline-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=50" - - "--github-token=/etc/github-token/token" - volumeMounts: - - name: github-token - mountPath: /etc/github-token - readOnly: true - volumes: - - name: github-token - secret: - secretName: covbot-token - - knative/eventing: - - name: pull-knative-eventing-build-tests - agent: kubernetes - context: pull-knative-eventing-build-tests - always_run: true - rerun_command: "/test pull-knative-eventing-build-tests" - trigger: "(?m)^/test (all|pull-knative-eventing-build-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--build-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-eventing-unit-tests - agent: kubernetes - context: pull-knative-eventing-unit-tests - always_run: true - rerun_command: "/test pull-knative-eventing-unit-tests" - trigger: "(?m)^/test (all|pull-knative-eventing-unit-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--unit-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-eventing-integration-tests - agent: kubernetes - context: pull-knative-eventing-integration-tests - always_run: true - rerun_command: "/test pull-knative-eventing-integration-tests" - trigger: "(?m)^/test (all|pull-knative-eventing-integration-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--integration-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-eventing-go-coverage - labels: - preset-service-account: "true" - agent: kubernetes - context: pull-knative-eventing-go-coverage - always_run: true - rerun_command: "/test pull-knative-eventing-go-coverage" - trigger: "(?m)^/test (all|pull-knative-eventing-go-coverage),?(\\s+|$)" - optional: true - decorate: true - clone_uri: "https://github.com/knative/eventing.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--postsubmit-gcs-bucket=knative-prow" - - "--postsubmit-job-name=post-knative-eventing-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=50" - - "--github-token=/etc/github-token/token" - volumeMounts: - - name: github-token - mountPath: /etc/github-token - readOnly: true - volumes: - - name: github-token - secret: - secretName: covbot-token - - knative/eventing-sources: - - name: pull-knative-eventing-sources-build-tests - agent: kubernetes - context: pull-knative-eventing-sources-build-tests - always_run: true - rerun_command: "/test pull-knative-eventing-sources-build-tests" - trigger: "(?m)^/test (all|pull-knative-eventing-sources-build-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--build-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-eventing-sources-unit-tests - agent: kubernetes - context: pull-knative-eventing-sources-unit-tests - always_run: true - rerun_command: "/test pull-knative-eventing-sources-unit-tests" - trigger: "(?m)^/test (all|pull-knative-eventing-sources-unit-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--unit-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-eventing-sources-integration-tests - agent: kubernetes - context: pull-knative-eventing-sources-integration-tests - always_run: true - rerun_command: "/test pull-knative-eventing-sources-integration-tests" - trigger: "(?m)^/test (all|pull-knative-eventing-sources-integration-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--integration-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-eventing-sources-go-coverage - labels: - preset-service-account: "true" - agent: kubernetes - context: pull-knative-eventing-sources-go-coverage - always_run: true - rerun_command: "/test pull-knative-eventing-sources-go-coverage" - trigger: "(?m)^/test (all|pull-knative-eventing-sources-go-coverage),?(\\s+|$)" - optional: true - decorate: true - clone_uri: "https://github.com/knative/eventing-sources.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--postsubmit-gcs-bucket=knative-prow" - - "--postsubmit-job-name=post-knative-eventing-sources-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=50" - - "--github-token=/etc/github-token/token" - volumeMounts: - - name: github-token - mountPath: /etc/github-token - readOnly: true - volumes: - - name: github-token - secret: - secretName: covbot-token - - knative/docs: - - name: pull-knative-docs-build-tests - agent: kubernetes - context: pull-knative-docs-build-tests - always_run: true - rerun_command: "/test pull-knative-docs-build-tests" - trigger: "(?m)^/test (all|pull-knative-docs-build-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--build-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-docs-unit-tests - agent: kubernetes - context: pull-knative-docs-unit-tests - always_run: true - rerun_command: "/test pull-knative-docs-unit-tests" - trigger: "(?m)^/test (all|pull-knative-docs-unit-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--unit-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-docs-integration-tests - agent: kubernetes - context: pull-knative-docs-integration-tests - always_run: true - rerun_command: "/test pull-knative-docs-integration-tests" - trigger: "(?m)^/test (all|pull-knative-docs-integration-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--integration-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-docs-go-coverage - labels: - preset-service-account: "true" - agent: kubernetes - context: pull-knative-docs-go-coverage - always_run: true - rerun_command: "/test pull-knative-docs-go-coverage" - trigger: "(?m)^/test (all|pull-knative-docs-go-coverage),?(\\s+|$)" - optional: true - decorate: true - clone_uri: "https://github.com/knative/docs.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--postsubmit-gcs-bucket=knative-prow" - - "--postsubmit-job-name=post-knative-docs-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=50" - - "--github-token=/etc/github-token/token" - volumeMounts: - - name: github-token - mountPath: /etc/github-token - readOnly: true - volumes: - - name: github-token - secret: - secretName: covbot-token - - knative/build-templates: - - name: pull-knative-build-templates-build-tests - agent: kubernetes - context: pull-knative-build-templates-build-tests - always_run: true - rerun_command: "/test pull-knative-build-templates-build-tests" - trigger: "(?m)^/test (all|pull-knative-build-templates-build-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--build-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-build-templates-unit-tests - agent: kubernetes - context: pull-knative-build-templates-unit-tests - always_run: true - rerun_command: "/test pull-knative-build-templates-unit-tests" - trigger: "(?m)^/test (all|pull-knative-build-templates-unit-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--unit-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-build-templates-integration-tests - agent: kubernetes - context: pull-knative-build-templates-integration-tests - always_run: true - rerun_command: "/test pull-knative-build-templates-integration-tests" - trigger: "(?m)^/test (all|pull-knative-build-templates-integration-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--integration-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - knative/pkg: - - name: pull-knative-pkg-build-tests - agent: kubernetes - context: pull-knative-pkg-build-tests - always_run: true - rerun_command: "/test pull-knative-pkg-build-tests" - trigger: "(?m)^/test (all|pull-knative-pkg-build-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--build-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-pkg-unit-tests - agent: kubernetes - context: pull-knative-pkg-unit-tests - always_run: true - rerun_command: "/test pull-knative-pkg-unit-tests" - trigger: "(?m)^/test (all|pull-knative-pkg-unit-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--unit-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-pkg-integration-tests - agent: kubernetes - context: pull-knative-pkg-integration-tests - always_run: true - rerun_command: "/test pull-knative-pkg-integration-tests" - trigger: "(?m)^/test (all|pull-knative-pkg-integration-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--integration-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-pkg-go-coverage - labels: - preset-service-account: "true" - agent: kubernetes - context: pull-knative-pkg-go-coverage - always_run: true - rerun_command: "/test pull-knative-pkg-go-coverage" - trigger: "(?m)^/test (all|pull-knative-pkg-go-coverage),?(\\s+|$)" - optional: true - decorate: true - clone_uri: "https://github.com/knative/pkg.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--postsubmit-gcs-bucket=knative-prow" - - "--postsubmit-job-name=post-knative-pkg-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=50" - - "--github-token=/etc/github-token/token" - volumeMounts: - - name: github-token - mountPath: /etc/github-token - readOnly: true - volumes: - - name: github-token - secret: - secretName: covbot-token - - knative/test-infra: - - name: pull-knative-test-infra-build-tests - agent: kubernetes - context: pull-knative-test-infra-build-tests - always_run: true - rerun_command: "/test pull-knative-test-infra-build-tests" - trigger: "(?m)^/test (all|pull-knative-test-infra-build-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--build-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-test-infra-unit-tests - agent: kubernetes - context: pull-knative-test-infra-unit-tests - always_run: true - rerun_command: "/test pull-knative-test-infra-unit-tests" - trigger: "(?m)^/test (all|pull-knative-test-infra-unit-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--unit-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-test-infra-integration-tests - agent: kubernetes - context: pull-knative-test-infra-integration-tests - always_run: true - rerun_command: "/test pull-knative-test-infra-integration-tests" - trigger: "(?m)^/test (all|pull-knative-test-infra-integration-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--integration-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - knative/caching: - - name: pull-knative-caching-build-tests - agent: kubernetes - context: pull-knative-caching-build-tests - always_run: true - rerun_command: "/test pull-knative-caching-build-tests" - trigger: "(?m)^/test (all|pull-knative-caching-build-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--build-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-caching-unit-tests - agent: kubernetes - context: pull-knative-caching-unit-tests - always_run: true - rerun_command: "/test pull-knative-caching-unit-tests" - trigger: "(?m)^/test (all|pull-knative-caching-unit-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--unit-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-caching-integration-tests - agent: kubernetes - context: pull-knative-caching-integration-tests - always_run: true - rerun_command: "/test pull-knative-caching-integration-tests" - trigger: "(?m)^/test (all|pull-knative-caching-integration-tests),?(\\s+|$)" - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/pr-logs" - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--integration-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - - - name: pull-knative-caching-go-coverage - labels: - preset-service-account: "true" - agent: kubernetes - context: pull-knative-caching-go-coverage - always_run: true - rerun_command: "/test pull-knative-caching-go-coverage" - trigger: "(?m)^/test (all|pull-knative-caching-go-coverage),?(\\s+|$)" - optional: true - decorate: true - clone_uri: "https://github.com/knative/caching.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--postsubmit-gcs-bucket=knative-prow" - - "--postsubmit-job-name=post-knative-caching-go-coverage" - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=50" - - "--github-token=/etc/github-token/token" - volumeMounts: - - name: github-token - mountPath: /etc/github-token - readOnly: true - volumes: - - name: github-token - secret: - secretName: covbot-token - -periodics: -- cron: "1 * * * *" # Run every hour and one minute - name: ci-knative-serving-continuous - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/serving" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=50" # Avoid overrun - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--all-tests" - - "--emit-metrics" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" -- cron: "1 8 * * *" # Run at 01:01PST every day (08:01 UTC) - name: ci-knative-serving-release - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/serving" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=90" # 1.5h - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./hack/release.sh" - - "--publish" - - "--tag-release" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" -- cron: "1 9 * * 6" # Run at 02:01PST every Saturday (09:01 UTC) - name: ci-knative-serving-playground - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/serving" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=90" # 1.5h - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./hack/deploy.sh" - - "knative-playground" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" -- cron: "5 8 * * *" # Run at 01:05PST every day (08:05 UTC) - name: ci-knative-serving-latency - agent: kubernetes - labels: - preset-service-account: "true" - decorate: true - extra_refs: - - org: knative - repo: serving - base_ref: master - clone_uri: "https://github.com/knative/serving.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/metrics:latest - imagePullPolicy: Always - command: - - "/metrics" - args: - - "--source-directory=ci-knative-serving-continuous" - - "--artifacts-dir=$(ARTIFACTS)" - - "--service-account=/etc/service-account/service-account.json" -- cron: "5 8 * * *" # Run at 01:05PST every day (08:05 UTC) - name: ci-knative-serving-api-coverage - agent: kubernetes - labels: - preset-service-account: "true" - decorate: true - extra_refs: - - org: knative - repo: serving - base_ref: master - clone_uri: "https://github.com/knative/serving.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/apicoverage:latest - imagePullPolicy: Always - command: - - "/apicoverage" - args: - - "--artifacts-dir=$(ARTIFACTS)" - - "--service-account=/etc/service-account/service-account.json" -- cron: "0 1 * * *" # Run at 01:00 every day - name: ci-knative-serving-go-coverage - agent: kubernetes - decorate: true - extra_refs: - - org: knative - repo: serving - base_ref: master - clone_uri: "https://github.com/knative/serving.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=80" -- cron: "0 1 * * *" # Run at 01:00 every day - name: ci-knative-serving-performance - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/serving" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=50" # Avoid overrun - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/performance-tests.sh" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - -- cron: "15 * * * *" # Run every hour and 15 minutes - name: ci-knative-build-continuous - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/build" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=50" # Avoid overrun - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--all-tests" - - "--emit-metrics" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" -- cron: "31 8 * * *" # Run at 01:31PST every day (08:31 UTC) - name: ci-knative-build-release - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - preset-dind-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/build" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=90" # 1.5h - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./hack/release.sh" - - "--publish" - - "--tag-release" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" -- cron: "5 8 * * *" # Run at 01:05PST every day (08:05 UTC) - name: ci-knative-build-latency - agent: kubernetes - labels: - preset-service-account: "true" - decorate: true - extra_refs: - - org: knative - repo: build - base_ref: master - clone_uri: "https://github.com/knative/build.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/metrics:latest - imagePullPolicy: Always - command: - - "/metrics" - args: - - "--source-directory=ci-knative-build-continuous" - - "--artifacts-dir=$(ARTIFACTS)" - - "--service-account=/etc/service-account/service-account.json" -- cron: "0 1 * * *" # Run at 01:00 every day - name: ci-knative-build-go-coverage - agent: kubernetes - decorate: true - extra_refs: - - org: knative - repo: build - base_ref: master - clone_uri: "https://github.com/knative/build.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=80" - -- cron: "50 * * * *" # Run every hour and 50 minutes - name: ci-knative-docs-continuous - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/docs" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=50" # Avoid overrun - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--all-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" -- cron: "0 1 * * *" # Run at 01:00 every day - name: ci-knative-docs-go-coverage - agent: kubernetes - decorate: true - extra_refs: - - org: knative - repo: docs - base_ref: master - clone_uri: "https://github.com/knative/docs.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=80" - -- cron: "30 * * * *" # Run every hour and 30 minutes - name: ci-knative-eventing-continuous - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/eventing" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=50" # Avoid overrun - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--all-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" -- cron: "16 9 * * *" # Run at 02:16PST every day (09:16 UTC) - name: ci-knative-eventing-release - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/eventing" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=90" # 1.5h - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./hack/release.sh" - - "--publish" - - "--tag-release" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" -- cron: "0 1 * * *" # Run at 01:00 every day - name: ci-knative-eventing-go-coverage - agent: kubernetes - decorate: true - extra_refs: - - org: knative - repo: eventing - base_ref: master - clone_uri: "https://github.com/knative/eventing.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=80" - -- cron: "30 * * * *" # Run every hour and 30 minutes - name: ci-knative-eventing-sources-continuous - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/eventing-sources" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=50" # Avoid overrun - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--all-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" -- cron: "16 9 * * *" # Run at 02:16PST every day (09:16 UTC) - name: ci-knative-eventing-sources-release - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/eventing-sources" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=90" # 1.5h - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./hack/release.sh" - - "--publish" - - "--tag-release" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" -- cron: "0 1 * * *" # Run at 01:00 every day - name: ci-knative-eventing-sources-go-coverage - agent: kubernetes - decorate: true - extra_refs: - - org: knative - repo: eventing-sources - base_ref: master - clone_uri: "https://github.com/knative/eventing-sources.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=80" - -- cron: "40 * * * *" # Run every hour and 40 minutes - name: ci-knative-build-templates-continuous - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/build-templates" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=50" # Avoid overrun - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--all-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" - -- cron: "45 * * * *" # Run every hour and 45 minutes - name: ci-knative-pkg-continuous - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/pkg" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=50" # Avoid overrun - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--all-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" -- cron: "0 1 * * *" # Run at 01:00 every day - name: ci-knative-pkg-go-coverage - agent: kubernetes - decorate: true - extra_refs: - - org: knative - repo: pkg - base_ref: master - clone_uri: "https://github.com/knative/pkg.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=80" - -- cron: "30 * * * *" # Run every hour and 30 minutes - name: ci-knative-caching-continuous - agent: kubernetes - labels: - preset-service-account: "true" - preset-bazel-scratch-dir: "true" - preset-bazel-remote-cache-enabled: "true" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/prow-tests:latest - imagePullPolicy: Always - args: - - "--scenario=kubernetes_execute_bazel" - - "--clean" - - "--job=$(JOB_NAME)" - - "--repo=github.com/knative/caching" - - "--root=/go/src" - - "--service-account=/etc/service-account/service-account.json" - - "--upload=gs://knative-prow/logs" - - "--timeout=50" # Avoid overrun - - "--" # end bootstrap args, scenario args below - - "--" # end kubernetes_execute_bazel flags (consider following flags as text) - - "./test/presubmit-tests.sh" - - "--all-tests" - # Bazel needs privileged mode in order to sandbox builds. - securityContext: - privileged: true - resources: - requests: - memory: "1Gi" -- cron: "0 1 * * *" # Run at 01:00 every day - name: ci-knative-caching-go-coverage - agent: kubernetes - decorate: true - extra_refs: - - org: knative - repo: caching - base_ref: master - clone_uri: "https://github.com/knative/caching.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=80" - -postsubmits: - knative/serving: - - name: post-knative-serving-go-coverage - branches: - - master - agent: kubernetes - decorate: true - clone_uri: "https://github.com/knative/serving.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=0" - - name: post-knative-serving-go-coverage-dev - branches: - - master - agent: kubernetes - decorate: true - clone_uri: "https://github.com/knative/serving.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage-dev:latest-dev - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=0" - - knative/build: - - name: post-knative-build-go-coverage - branches: - - master - agent: kubernetes - decorate: true - clone_uri: "https://github.com/knative/build.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=0" - - knative/docs: - - name: post-knative-docs-go-coverage - branches: - - master - agent: kubernetes - decorate: true - clone_uri: "https://github.com/knative/docs.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=0" - - knative/eventing: - - name: post-knative-eventing-go-coverage - branches: - - master - agent: kubernetes - decorate: true - clone_uri: "https://github.com/knative/eventing.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=0" - - knative/eventing-sources: - - name: post-knative-eventing-sources-go-coverage - branches: - - master - agent: kubernetes - decorate: true - clone_uri: "https://github.com/knative/eventing-sources.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=0" - - knative/pkg: - - name: post-knative-pkg-go-coverage - branches: - - master - agent: kubernetes - decorate: true - clone_uri: "https://github.com/knative/pkg.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=0" - - knative/caching: - - name: post-knative-caching-go-coverage - branches: - - master - agent: kubernetes - decorate: true - clone_uri: "https://github.com/knative/caching.git" - spec: - containers: - - image: gcr.io/knative-tests/test-infra/coverage:latest - imagePullPolicy: Always - command: - - "/coverage" - args: - - "--artifacts=$(ARTIFACTS)" - - "--profile-name=coverage_profile.txt" - - "--cov-target=." - - "--cov-threshold-percentage=0" - diff --git a/vendor/github.com/knative/test-infra/ci/prow/config_start.yaml b/vendor/github.com/knative/test-infra/ci/prow/config_start.yaml deleted file mode 100644 index ada1a3e62fa..00000000000 --- a/vendor/github.com/knative/test-infra/ci/prow/config_start.yaml +++ /dev/null @@ -1,339 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Initial configuration of prow cluster - -# Configs - -apiVersion: v1 -kind: ConfigMap -metadata: - name: plugins -data: - plugins: "" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: config -data: - config: "" ---- - -# Namespaces - -apiVersion: v1 -kind: Namespace -metadata: - name: prow ---- -apiVersion: v1 -kind: Namespace -metadata: - name: test-pods ---- - -# Service accounts, roles and bindings - -kind: ServiceAccount -apiVersion: v1 -metadata: - name: "boskos" - namespace: test-pods ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "boskos" - namespace: test-pods -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: boskos -subjects: -- kind: ServiceAccount - name: "boskos" - namespace: test-pods ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "boskos" - namespace: test-pods -rules: - - apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - create - - apiGroups: - - boskos.k8s.io - resources: - - resources - verbs: - - "*" ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: "default" - namespace: test-pods ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "test-pods-default" - namespace: test-pods -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "test-pods-default" -subjects: -- kind: ServiceAccount - name: "default" - namespace: test-pods ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "test-pods-default" - namespace: test-pods -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: "deck" ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "deck" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "deck" -subjects: -- kind: ServiceAccount - name: "deck" - namespace: default ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "deck" -rules: - - apiGroups: - - "" - resources: - - pods/log - verbs: - - get - - apiGroups: - - "prow.k8s.io" - resources: - - prowjobs - verbs: - - get - - list ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: "horologium" ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "horologium" -rules: - - apiGroups: - - "prow.k8s.io" - resources: - - prowjobs - verbs: - - create - - list ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "horologium" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "horologium" -subjects: -- kind: ServiceAccount - name: "horologium" - namespace: default ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: "plank" ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "plank" -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - list - - apiGroups: - - "prow.k8s.io" - resources: - - prowjobs - verbs: - - create - - list - - update ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "plank" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "plank" -subjects: -- kind: ServiceAccount - name: "plank" - namespace: default ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: "sinker" ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "sinker" -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - delete - - list - - apiGroups: - - "prow.k8s.io" - resources: - - prowjobs - verbs: - - delete - - list ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "sinker" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "sinker" -subjects: -- kind: ServiceAccount - name: "sinker" - namespace: default ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: "hook" ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "hook" -rules: - - apiGroups: - - "prow.k8s.io" - resources: - - prowjobs - verbs: - - create - - get - - apiGroups: - - "" - resources: - - configmaps - verbs: - - update ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "hook" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "hook" -subjects: -- kind: ServiceAccount - name: "hook" ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: "tide" ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "tide" -rules: - - apiGroups: - - "prow.k8s.io" - resources: - - prowjobs - verbs: - - create - - get - - list - - apiGroups: - - "" - resources: - - configmaps - verbs: - - update ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: "tide" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "tide" -subjects: -- kind: ServiceAccount - name: "tide" - namespace: default diff --git a/vendor/github.com/knative/test-infra/ci/prow/plugins.yaml b/vendor/github.com/knative/test-infra/ci/prow/plugins.yaml deleted file mode 100644 index 57c8b2f4079..00000000000 --- a/vendor/github.com/knative/test-infra/ci/prow/plugins.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -approve: -- repos: - - knative - implicit_self_approve: true - review_acts_as_approve: true - -plugins: - knative: - - approve - - assign - - blunderbuss - - buildifier - - cat - - dog - - golint - - heart - - help - - hold - - label - - lgtm - - lifecycle - - shrug - - size - - skip - - trigger - - wip - - yuks diff --git a/vendor/github.com/knative/test-infra/ci/prow/prow_setup.md b/vendor/github.com/knative/test-infra/ci/prow/prow_setup.md deleted file mode 100644 index 3f04729dfb5..00000000000 --- a/vendor/github.com/knative/test-infra/ci/prow/prow_setup.md +++ /dev/null @@ -1,71 +0,0 @@ -# Prow setup - -## Creating the cluster - -1. Create the GKE cluster, the role bindings and the GitHub secrets. You might need to update [Makefile](./Makefile). For details, see https://github.com/kubernetes/test-infra/blob/master/prow/getting_started.md. - -1. Ensure the GCP projects listed in [resources.yaml](./boskos/resources.yaml) are created. - -1. Apply [config_start.yaml](./config_start.yaml) to the cluster. - -1. Apply Boskos [config_start.yaml](./boskos/config_start.yaml) to the cluster. - -1. Run `make update-cluster`, `make update-boskos`, `make update-config`, `make update-plugins` and `make update-boskos-config`. - -1. If SSL needs to be reconfigured, promote your ingress IP to static in Cloud Console, and [create the TLS secret](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls). - -## Expanding Boskos pool - -1. Create a new GCP project and add it to [resources.yaml](./boskos/resources.yaml). - -1. Make `knative-tests@appspot.gserviceaccount.com` an editor of the project. - -1. Enable the Compute Engine API for the project (e.g., by visiting https://console.developers.google.com/apis/api/compute.googleapis.com/overview?project=XXXXXXXX). - -1. Enable the Kubernetes Engine API for the project (e.g., by visiting https://console.cloud.google.com/apis/api/container.googleapis.com/overview?project=XXXXXXXX). - -1. Run `make update-boskos-config`. - -## Setting up Prow for a new repo - -1. Create the appropriate `OWNERS` files (at least one for the root dir). - -1. Make sure that *Knative Robots* is an Admin of the repo. - -1. Update the tide section in the Prow config file and run `make update-config` (ask one of the owners of knative/test-infra). - -1. Wait a few minutes, check that Prow is working by entering `/woof` as a comment in any PR in the new repo. - -1. Set **tide** as a required status check for the master branch. - -### Setting up jobs for a new repo - -1. Have the test infrastructure in place (usually this means having at least `//test/presubmit-tests.sh` working, and optionally `//hack/release.sh` working for automated nightly releases). - -1. Merge a pull request (e.g., https://github.com/knative/test-infra/pull/203) that: - - 1. Updates the Prow config file (usually, copy and update existing jobs from another repository). - - 1. For the presubmit tests, setup the *pull-knative-**repo**-**(build|unit|integration)**-tests* jobs. - - 1. For go test coverage, setup the ***(pull|post|ci)**-knative-**repo**-go-coverage* jobs. - - 1. For the continuous integration tests, setup the *ci-knative-**repo**-continuous* job. - - 1. For automated nightly releases, setup the *ci-knative-**repo**-release* job. - - 1. Updates the Gubernator config with the new log dirs. - - 1. Updates the Testgrid config with the new buckets, tabs and dashboard. - -1. Ask one of the owners of *knative/test-infra* to: - - 1. Run `make update-config` in `ci/prow`. - - 1. Run `make deploy` in `ci/gubernator`. - - 1. Run `make update-config` in `ci/testgrid`. - -1. Wait a few minutes, enter `/retest` as a comment in any PR in the repo and ensure the test jobs are executed. - -1. Set the new test jobs as required status checks for the master branch. diff --git a/vendor/github.com/knative/test-infra/ci/testgrid/Makefile b/vendor/github.com/knative/test-infra/ci/testgrid/Makefile deleted file mode 100644 index 5cf42d995b9..00000000000 --- a/vendor/github.com/knative/test-infra/ci/testgrid/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -TESTGRID_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) - -test: - bazel run @k8s//testgrid/cmd/configurator -- \ - --yaml=$(TESTGRID_DIR)/config.yaml \ - --validate-config-file - -update-config: -ifndef GOOGLE_APPLICATION_CREDENTIALS - $(error GOOGLE_APPLICATION_CREDENTIALS not set) -endif - bazel run @k8s//testgrid/cmd/configurator -- \ - --yaml=$(TESTGRID_DIR)/config.yaml \ - --output=gs://knative-testgrid/config \ - --oneshot diff --git a/vendor/github.com/knative/test-infra/ci/testgrid/README.md b/vendor/github.com/knative/test-infra/ci/testgrid/README.md deleted file mode 100644 index 7b028e040f3..00000000000 --- a/vendor/github.com/knative/test-infra/ci/testgrid/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Testgrid config - -This directory contains the config for our [Testgrid](https://github.com/kubernetes/test-infra/tree/master/testgrid) instance. - -* `Makefile` Commands to interact with the Testgrid instance regarding updates. -* `config.yaml` Testgrid configuration. diff --git a/vendor/github.com/knative/test-infra/ci/testgrid/config.yaml b/vendor/github.com/knative/test-infra/ci/testgrid/config.yaml deleted file mode 100644 index 60a5958eb09..00000000000 --- a/vendor/github.com/knative/test-infra/ci/testgrid/config.yaml +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default testgroup and dashboardtab, please do not change them -default_test_group: - days_of_results: 14 # Number of days of test results to gather and serve - tests_name_policy: 2 # Replace the name of the test - ignore_pending: false # Show in-progress tests - column_header: - - configuration_value: Commit # Shows the commit number on column header - - configuration_value: infra-commit - num_columns_recent: 10 # The number of columns to consider "recent" for a variety of purposes - use_kubernetes_client: true # ** This field is deprecated and should always be true ** - is_external: true # ** This field is deprecated and should always be true ** - alert_stale_results_hours: 0 # Don't alert for staleness by default - num_failures_to_alert: 3 # Consider a test failed if it has 3 or more consecutive failures - num_passes_to_disable_alert: 1 # Consider a failing test passing if it has 1 or more consecutive passes - -default_dashboard_tab: - open_test_template: # The URL template to visit after clicking on a cell - url: https://gubernator.knative.dev/build// - file_bug_template: # The URL template to visit when filing a bug - url: https://github.com/knative/serving/issues/new - options: - - key: title - value: 'Test "" failed' - - key: body - value: - attach_bug_template: # The URL template to visit when attaching a bug - url: # Empty - options: # Empty - # Text to show in the about menu as a link to another view of the results - results_text: See these results in Gubernator - results_url_template: # The URL template to visit after clicking - url: https://gubernator.knative.dev/builds/ - # URL for regression search links. - code_search_path: github.com/knative/serving/search - num_columns_recent: 10 - code_search_url_template: # The URL template to visit when searching for changelists - url: https://github.com/knative/serving/compare/... - -# Test groups - -test_groups: -- name: ci-knative-serving-continuous - gcs_prefix: knative-prow/logs/ci-knative-serving-continuous -- name: ci-knative-serving-release - gcs_prefix: knative-prow/logs/ci-knative-serving-release -- name: ci-knative-serving-playground - gcs_prefix: knative-prow/logs/ci-knative-serving-playground -- name: pull-knative-serving-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-serving-go-coverage - short_text_metric: coverage -- name: ci-knative-serving-latency - gcs_prefix: knative-prow/logs/ci-knative-serving-latency - short_text_metric: latency -- name: ci-knative-serving-api-coverage - gcs_prefix: knative-prow/logs/ci-knative-serving-api-coverage - short_text_metric: api_coverage -- name: ci-knative-build-continuous - gcs_prefix: knative-prow/logs/ci-knative-build-continuous -- name: ci-knative-build-release - gcs_prefix: knative-prow/logs/ci-knative-build-release -- name: pull-knative-build-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-build-go-coverage - short_text_metric: coverage -- name: ci-knative-build-latency - gcs_prefix: knative-prow/logs/ci-knative-build-latency - short_text_metric: latency -- name: ci-knative-build-templates-continuous - gcs_prefix: knative-prow/logs/ci-knative-build-templates-continuous -- name: pull-knative-build-pipeline-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-build-pipeline-go-coverage - short_text_metric: coverage -- name: ci-knative-eventing-continuous - gcs_prefix: knative-prow/logs/ci-knative-eventing-continuous -- name: ci-knative-eventing-release - gcs_prefix: knative-prow/logs/ci-knative-eventing-release -- name: pull-knative-eventing-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-eventing-go-coverage - short_text_metric: coverage -- name: ci-knative-eventing-sources-continuous - gcs_prefix: knative-prow/logs/ci-knative-eventing-sources-continuous -- name: ci-knative-eventing-sources-release - gcs_prefix: knative-prow/logs/ci-knative-eventing-sources-release -- name: pull-knative-eventing-sources-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-eventing-sources-go-coverage - short_text_metric: coverage -- name: ci-knative-docs-continuous - gcs_prefix: knative-prow/logs/ci-knative-docs-continuous -- name: pull-knative-docs-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-docs-go-coverage - short_text_metric: coverage -- name: ci-knative-pkg-continuous - gcs_prefix: knative-prow/logs/ci-knative-pkg-continuous -- name: pull-knative-pkg-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-pkg-go-coverage - short_text_metric: coverage -- name: ci-knative-caching-continuous - gcs_prefix: knative-prow/logs/ci-knative-caching-continuous -- name: pull-knative-caching-test-coverage - gcs_prefix: knative-prow/logs/ci-knative-caching-go-coverage - short_text_metric: coverage - -# Dashboards - -dashboards: -- name: knative-serving - dashboard_tab: - - name: continuous - test_group_name: ci-knative-serving-continuous - - name: release - test_group_name: ci-knative-serving-release - - name: playground - test_group_name: ci-knative-serving-playground - - name: coverage - test_group_name: pull-knative-serving-test-coverage - base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' - - name: latency - test_group_name: ci-knative-serving-latency - description: '95% latency in ms' - base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' - - name: api-coverage - test_group_name: ci-knative-serving-api-coverage - description: 'Conformance tests API coverage.' - base_options: 'exclude-filter-by-regex=Overall$&group-by-directory=&expand-groups=&sort-by-name=' -- name: knative-build - dashboard_tab: - - name: continuous - test_group_name: ci-knative-build-continuous - - name: release - test_group_name: ci-knative-build-release - - name: coverage - test_group_name: pull-knative-build-test-coverage - base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' - - name: latency - test_group_name: ci-knative-build-latency - description: '95% latency in ms' - base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' -- name: knative-build-templates - dashboard_tab: - - name: continuous - test_group_name: ci-knative-build-templates-continuous -- name: knative-build-pipeline - dashboard_tab: - - name: coverage - test_group_name: pull-knative-build-pipeline-test-coverage - base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' -- name: knative-eventing - dashboard_tab: - - name: continuous - test_group_name: ci-knative-eventing-continuous - - name: release - test_group_name: ci-knative-eventing-release - - name: coverage - test_group_name: pull-knative-eventing-test-coverage - base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' -- name: knative-eventing-sources - dashboard_tab: - - name: continuous - test_group_name: ci-knative-eventing-sources-continuous - - name: release - test_group_name: ci-knative-eventing-sources-release - - name: coverage - test_group_name: pull-knative-eventing-sources-test-coverage - base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' -- name: knative-docs - dashboard_tab: - - name: continuous - test_group_name: ci-knative-docs-continuous - - name: coverage - test_group_name: pull-knative-docs-test-coverage - base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' -- name: knative-pkg - dashboard_tab: - - name: continuous - test_group_name: ci-knative-pkg-continuous - - name: coverage - test_group_name: pull-knative-pkg-test-coverage - base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' -- name: knative-caching - dashboard_tab: - - name: continuous - test_group_name: ci-knative-caching-continuous - - name: coverage - test_group_name: pull-knative-caching-test-coverage - base_options: 'exclude-filter-by-regex=Overall&group-by-directory=&expand-groups=&sort-by-name=' - -# Dashboard groups - -dashboard_groups: -- name: knative - dashboard_names: - - knative-serving - - knative-build - - knative-build-templates - - knative-build-pipeline - - knative-eventing - - knative-eventing-sources - - knative-docs - - knative-pkg - - knative-caching diff --git a/vendor/github.com/knative/test-infra/dummy.go b/vendor/github.com/knative/test-infra/dummy.go deleted file mode 100644 index 94e6159ba78..00000000000 --- a/vendor/github.com/knative/test-infra/dummy.go +++ /dev/null @@ -1,10 +0,0 @@ -package main - -import ( - "fmt" -) - -func main() { - fmt.Println("This is a dummy go file so `go dep` can be used with knative/test-infra repo") - fmt.Println("This file can be removed once the repo contains real, useful go code in the root dir") -} diff --git a/vendor/github.com/knative/test-infra/images/README.md b/vendor/github.com/knative/test-infra/images/README.md deleted file mode 100644 index 22b9b16edd0..00000000000 --- a/vendor/github.com/knative/test-infra/images/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Prow Job Images - -This directory contains custom Docker images used by our Prow jobs. diff --git a/vendor/github.com/knative/test-infra/images/apicoverage/Dockerfile b/vendor/github.com/knative/test-infra/images/apicoverage/Dockerfile deleted file mode 100644 index 897ec7d82ef..00000000000 --- a/vendor/github.com/knative/test-infra/images/apicoverage/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM golang:1.10.2 -LABEL maintainer "Srinivas Hegde " -RUN apt-get update && apt-get install -y --no-install-recommends - -COPY apicoverage /apicoverage -ENTRYPOINT ["/apicoverage"] diff --git a/vendor/github.com/knative/test-infra/images/apicoverage/Makefile b/vendor/github.com/knative/test-infra/images/apicoverage/Makefile deleted file mode 100644 index b5a87ca546c..00000000000 --- a/vendor/github.com/knative/test-infra/images/apicoverage/Makefile +++ /dev/null @@ -1,23 +0,0 @@ - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -REGISTRY ?= gcr.io -PROJECT ?= knative-tests/test-infra -PUSH ?= docker push - -apicoverage-image: - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ../../tools/apicoverage - docker build -t "$(REGISTRY)/$(PROJECT)/apicoverage:latest" . - $(PUSH) "$(REGISTRY)/$(PROJECT)/apicoverage:latest" diff --git a/vendor/github.com/knative/test-infra/images/apicoverage/README.md b/vendor/github.com/knative/test-infra/images/apicoverage/README.md deleted file mode 100644 index b855777358b..00000000000 --- a/vendor/github.com/knative/test-infra/images/apicoverage/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# API coverage tool Image - -This directory contains the custom docker image used for calculating the API coverage by the conformance tests. diff --git a/vendor/github.com/knative/test-infra/images/prow-tests/Dockerfile b/vendor/github.com/knative/test-infra/images/prow-tests/Dockerfile deleted file mode 100644 index 7baf483481e..00000000000 --- a/vendor/github.com/knative/test-infra/images/prow-tests/Dockerfile +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM gcr.io/k8s-testimages/kubekins-e2e:v20181001-df2f5324a-master -LABEL maintainer "Adriano Cunha " - -# Install extras on top of base image - -ENV DEBIAN_FRONTEND noninteractive -RUN apt-get update -RUN gcloud components update - -# Docker -RUN gcloud components install docker-credential-gcr -RUN docker-credential-gcr configure-docker - -# Extra tools through apt-get -RUN apt-get install -y uuid-runtime # for uuidgen -RUN apt-get install -y npm # for markdown-link-check -RUN apt-get install -y rubygems # for mdl -RUN apt-get install -y build-essential libssl-dev # for wrk -RUN apt-get install -y netbase # sets up /etc/services needed for wrk - -# Extra tools through go get -RUN go get -u github.com/google/go-containerregistry/cmd/ko -RUN go get -u github.com/golang/dep/cmd/dep -RUN go get -u github.com/google/licenseclassifier - -# Extra tools through npm -RUN npm install -g markdown-link-check - -# Extra tools through gem -RUN gem install mixlib-config -v 2.2.4 # required because ruby is 2.1 -RUN gem install mdl - -# Install wrk -RUN git clone https://github.com/wg/wrk.git wrk -RUN make -C wrk/ -RUN cp wrk/wrk /usr/local/bin - -ADD . /go/src/github.com/knative/test-infra - -# Extra custom tools -RUN cp /go/src/github.com/knative/test-infra/tools/githubhelper/githubhelper . -RUN go install github.com/knative/test-infra/tools/dep-collector diff --git a/vendor/github.com/knative/test-infra/images/prow-tests/Makefile b/vendor/github.com/knative/test-infra/images/prow-tests/Makefile deleted file mode 100644 index 6e1ce3c08ca..00000000000 --- a/vendor/github.com/knative/test-infra/images/prow-tests/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -IMG = gcr.io/knative-tests/test-infra/prow-tests -TAG := $(shell date +v%Y%m%d)-$(shell git describe --tags --always --dirty) - -all: build - -build: - make -C ../../tools/githubhelper - docker build -t $(IMG):$(TAG) -f Dockerfile ../.. - docker tag $(IMG):$(TAG) $(IMG):latest - -push_versioned: build - docker push $(IMG):$(TAG) - -push_latest: build - docker push $(IMG):latest - -clean: - rm -fr githubhelper dep-collector - -push: push_versioned push_latest clean diff --git a/vendor/github.com/knative/test-infra/images/prow-tests/README.md b/vendor/github.com/knative/test-infra/images/prow-tests/README.md deleted file mode 100644 index d1b904427e4..00000000000 --- a/vendor/github.com/knative/test-infra/images/prow-tests/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Prow Test Job Image - -This directory contains the custom Docker image used by our Prow test jobs. - -## Building and publishing a new image - -To build and push a new image, just run `make push`. - -For testing purposes you can build an image but not push it; to do so, run `make build`. - -Note that you must have proper permission in the `knative-tests` project to push new images to the GCR. - -The `prow-tests` image is pinned on a specific `kubekins` image; update `Dockerfile` if you need to use a newer/different image. This will basically define the versions of `bazel`, `go`, `kubectl` and other build tools. diff --git a/vendor/github.com/knative/test-infra/test/e2e-tests.sh b/vendor/github.com/knative/test-infra/test/e2e-tests.sh deleted file mode 100755 index 128733ce38d..00000000000 --- a/vendor/github.com/knative/test-infra/test/e2e-tests.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script runs the end-to-end tests. - -# If you already have a Knative cluster setup and kubectl pointing -# to it, call this script with the --run-tests arguments and it will use -# the cluster and run the tests. - -# Calling this script without arguments will create a new cluster in -# project $PROJECT_ID, run the tests and delete the cluster. - -source $(dirname $0)/../scripts/e2e-tests.sh - -function parse_flags() { - if [[ "$1" == "--smoke-test-custom-flag" ]]; then - echo "--smoke-test-custom-flag passed" - exit 0 - fi - return 0 -} - -# Script entry point. - -initialize $@ - -if (( USING_EXISTING_CLUSTER )); then - echo "ERROR: this test isn't intended to run against an existing cluster" - fail_test -fi - -start_latest_knative_serving || fail_test "Knative Serving is not up" - -# This is actually a unit test, but it does exercise the necessary helper functions. -go_test_e2e -run TestE2ESucceeds ./test || fail_test - -success diff --git a/vendor/github.com/knative/test-infra/test/presubmit-tests.sh b/vendor/github.com/knative/test-infra/test/presubmit-tests.sh deleted file mode 100755 index f3f1c71080e..00000000000 --- a/vendor/github.com/knative/test-infra/test/presubmit-tests.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script runs the presubmit tests; it is started by prow for each PR. -# For convenience, it can also be executed manually. -# Running the script without parameters, or with the --all-tests -# flag, causes all tests to be executed, in the right order. -# Use the flags --build-tests, --unit-tests and --integration-tests -# to run a specific set of tests. - -source $(dirname $0)/../scripts/presubmit-tests.sh - -function build_tests() { - header "Running build tests" - local failed=0 - make -C ci/prow test || failed=1 - make -C ci/testgrid test || failed=1 - for script in scripts/*.sh; do - echo "Checking integrity of ${script}" - bash -c "source ${script}" || failed=1 - done - return ${failed} -} - -function unit_tests() { - header "Running unit tests" - local failed=0 - for test in ./test/unit/*-tests.sh; do - ${test} || failed=1 - done - return ${failed} -} - -# We use the default integration test runner. - -main $@ diff --git a/vendor/github.com/knative/test-infra/test/unit/e2e-custom-flag-tests.sh b/vendor/github.com/knative/test-infra/test/unit/e2e-custom-flag-tests.sh deleted file mode 100755 index b5528861752..00000000000 --- a/vendor/github.com/knative/test-infra/test/unit/e2e-custom-flag-tests.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script runs the end-to-end tests. - -# If you already have a Knative cluster setup and kubectl pointing -# to it, call this script with the --run-tests arguments and it will use -# the cluster and run the tests. - -# Calling this script without arguments will create a new cluster in -# project $PROJECT_ID, run the tests and delete the cluster. - -source $(dirname $0)/../../scripts/e2e-tests.sh - -function parse_flags() { - if [[ "$1" == "--smoke-test-custom-flag" ]]; then - echo "OK: --smoke-test-custom-flag passed" - exit 0 - fi - fail_test "Unexpected flag $1 passed" -} - -# Script entry point. - -initialize --smoke-test-custom-flag diff --git a/vendor/github.com/knative/test-infra/test/unit/library-tests.sh b/vendor/github.com/knative/test-infra/test/unit/library-tests.sh deleted file mode 100755 index 13bf1cee725..00000000000 --- a/vendor/github.com/knative/test-infra/test/unit/library-tests.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Fake we're in a Prow job, if running locally. -[[ -z "${PROW_JOB_ID:-}" ]] && PROW_JOB_ID=123 - -source $(dirname $0)/../../scripts/library.sh - -set -e - -function test_report() { - local REPORT="$(mktemp)" - report_go_test -run $1 ./test > ${REPORT} || true - cat ${REPORT} - grep "$2" ${REPORT} > /dev/null - grep "Done parsing 1 tests" ${REPORT} > /dev/null -} - -# Cleanup bazel stuff to avoid confusing Prow -function cleanup_bazel() { - bazel clean -} - -trap cleanup_bazel EXIT - -header "Testing report_go_test" - -subheader "Test pass" -test_report TestSucceeds "^- TestSucceeds :PASS:" - -subheader "Test fails with fatal" -test_report TestFailsWithFatal "^- TestFailsWithFatal :FAIL:" - -subheader "Test fails with SIGQUIT" -test_report TestFailsWithSigQuit "^- TestFailsWithSigQuit :FAIL:" - -header "All tests passed" diff --git a/vendor/github.com/knative/test-infra/test/unit/presubmit-full-custom-integration-tests.sh b/vendor/github.com/knative/test-infra/test/unit/presubmit-full-custom-integration-tests.sh deleted file mode 100755 index d22b66e32dc..00000000000 --- a/vendor/github.com/knative/test-infra/test/unit/presubmit-full-custom-integration-tests.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -source $(dirname $0)/presubmit-integration-tests-common.sh - -function check_results() { - (( PRE_INTEGRATION_TESTS )) || failed "Pre integration tests did not run" - (( CUSTOM_INTEGRATION_TESTS )) || failed "Custom integration tests did not run" - (( POST_INTEGRATION_TESTS )) || failed "Post integration tests did not run" - echo "Test passed" -} - -echo "Testing all custom test integration functions" - -main $@ diff --git a/vendor/github.com/knative/test-infra/test/unit/presubmit-integration-tests-common.sh b/vendor/github.com/knative/test-infra/test/unit/presubmit-integration-tests-common.sh deleted file mode 100755 index 78c0f4d0646..00000000000 --- a/vendor/github.com/knative/test-infra/test/unit/presubmit-integration-tests-common.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -source $(dirname $0)/../../scripts/presubmit-tests.sh - -function failed() { - echo $1 - exit 1 -} - -function pre_integration_tests() { - PRE_INTEGRATION_TESTS=1 -} - -function integration_tests() { - CUSTOM_INTEGRATION_TESTS=1 -} - -function post_integration_tests() { - POST_INTEGRATION_TESTS=1 -} - -function build_tests() { - return 0 -} - -function unit_tests() { - return 0 -} - -PRE_INTEGRATION_TESTS=0 -CUSTOM_INTEGRATION_TESTS=0 -POST_INTEGRATION_TESTS=0 - -trap check_results EXIT diff --git a/vendor/github.com/knative/test-infra/test/unit/presubmit-partial-custom-integration-tests.sh b/vendor/github.com/knative/test-infra/test/unit/presubmit-partial-custom-integration-tests.sh deleted file mode 100755 index e0fb4ef24fa..00000000000 --- a/vendor/github.com/knative/test-infra/test/unit/presubmit-partial-custom-integration-tests.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Test that pre/post integration tests don't run if unset. - -source $(dirname $0)/presubmit-integration-tests-common.sh - -function check_results() { - (( ! PRE_INTEGRATION_TESTS )) || failed "Pre integration tests did run" - (( CUSTOM_INTEGRATION_TESTS )) || failed "Custom integration tests did not run" - (( ! POST_INTEGRATION_TESTS )) || failed "Post integration tests did run" - echo "Test passed" -} - -echo "Testing custom test integration function" - -unset -f pre_integration_tests -unset -f post_integration_tests - -main $@ diff --git a/vendor/github.com/knative/test-infra/test/unit/release-tests.sh b/vendor/github.com/knative/test-infra/test/unit/release-tests.sh deleted file mode 100755 index 4b226df1bb6..00000000000 --- a/vendor/github.com/knative/test-infra/test/unit/release-tests.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash - -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -source $(dirname $0)/../../scripts/release.sh - -set -e - -# Call a function and verify its return value and output. -# Parameters: $1 - expected return code. -# $2 - expected output ("" if no output is expected) -# $3 ..$n - function to call and its parameters. -function test_function() { - local expected_retcode=$1 - local expected_string=$2 - local output="$(mktemp)" - local output_code="$(mktemp)" - shift 2 - echo -n "$(trap '{ echo $? > ${output_code}; }' EXIT ; $@)" > ${output} - local retcode=$(cat ${output_code}) - if [[ ${retcode} -ne ${expected_retcode} ]]; then - cat ${output} - echo "Return code ${retcode} doesn't match expected return code ${expected_retcode}" - return 1 - fi - if [[ -n "${expected_string}" ]]; then - local found=1 - grep "${expected_string}" ${output} > /dev/null || found=0 - if (( ! found )); then - cat ${output} - echo "String '${expected_string}' not found" - return 1 - fi - else - if [[ -s ${output} ]]; then - ls ${output} - cat ${output} - echo "Unexpected output" - return 1 - fi - fi - echo "'$@' returns code ${expected_retcode} and displays '${expected_string}'" -} - -header "Testing initialization" - -test_function 1 "error: missing version" initialize --version -test_function 1 "error: version format" initialize --version a -test_function 1 "error: version format" initialize --version 0.0 -test_function 0 "" initialize --version 1.0.0 - -test_function 1 "error: missing branch" initialize --branch -test_function 1 "error: branch name must be" initialize --branch a -test_function 1 "error: branch name must be" initialize --branch 0.0 -test_function 0 "" initialize --branch release-0.0 - -test_function 1 "error: missing release notes" initialize --release-notes -test_function 1 "error: file a doesn't" initialize --release-notes a -test_function 0 "" initialize --release-notes $(mktemp) - -header "All tests passed" diff --git a/vendor/github.com/knative/test-infra/tools/README.md b/vendor/github.com/knative/test-infra/tools/README.md deleted file mode 100644 index d4cf2a272f2..00000000000 --- a/vendor/github.com/knative/test-infra/tools/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Test Infrastructure tools - -This directory contains tools used by our Prow jobs. diff --git a/vendor/github.com/knative/test-infra/tools/apicoverage/README.md b/vendor/github.com/knative/test-infra/tools/apicoverage/README.md deleted file mode 100644 index 01ddf855151..00000000000 --- a/vendor/github.com/knative/test-infra/tools/apicoverage/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# API Coverage Tool -This tool is designed to show the field level coverage exercised by the conformance tests. - -## Read from GCS -This tool reads the logs from the latest continous build of knative/serving. The logs have the information of which CRD objects are being created and which fields are being set for the testing. -It uses the service account passed in or by default will use the GOOGLE_APPLICATION_CREDENTIALS variable to get the logs. - -## Creating Output -This tool creates an output xml in the prow artifacts directory. The prow artifacts directory is passed in or by default will use `./artifacts` directory. - -This output xml will be read by testgrid and displayed on the [dashboard](https://testgrid.knative.dev/knative-serving#api-coverage). - -## Prow Job -There is a daily prow job that triggers this tool that is run at 01:05 AM PST. This tool will then generate the output xml which is then displayed in the testgrid dashboard. diff --git a/vendor/github.com/knative/test-infra/tools/apicoverage/apicoverage.go b/vendor/github.com/knative/test-infra/tools/apicoverage/apicoverage.go deleted file mode 100644 index 6e007a7270b..00000000000 --- a/vendor/github.com/knative/test-infra/tools/apicoverage/apicoverage.go +++ /dev/null @@ -1,241 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// apicoverage.go parses the log file and outputs the api coverage numbers in a -// testgrid expected output xml file - -package main - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "log" - "os" - "reflect" - "strings" - - "github.com/knative/serving/pkg/apis/serving/v1alpha1" - "github.com/knative/test-infra/tools/gcs" - "github.com/knative/test-infra/tools/testgrid" -) - -const ( - logDir = "logs/ci-knative-serving-continuous/" - buildFile = "build-log.txt" - apiCoverage = "api_coverage" - overallRoute = "OverallRoute" - overallConfig = "OverallConfiguration" - overallService = "OverallService" -) - -// ResourceObjects defines the resource objects in knative-serving -type ResourceObjects struct { - Route *v1alpha1.Route - Configuration *v1alpha1.Configuration - Service *v1alpha1.Service -} - -// OverallAPICoverage defines the overall api coverage for knative serving -type OverallAPICoverage struct { - RouteAPICovered map[string]int - RouteAPINotCovered map[string]int - ConfigurationAPICovered map[string]int - ConfigurationAPINotCovered map[string]int - ServiceAPICovered map[string]int - ServiceAPINotCovered map[string]int -} - -type apiObjectName string - -const ( - apiObjectRoute apiObjectName = "route" - apiObjectConfiguration = "configuration" - apiObjectService = "service" -) - -// check if the object value is nil or empty. -// Uses https://golang.org/pkg/reflect/#Kind to get the variable type -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - } - return false -} - -func isStruct(v reflect.Value) bool { - return v.Kind() == reflect.Struct -} - -// Parse the struct and returns a map of -func parseStruct(v reflect.Value) map[string]reflect.Value { - f := make(map[string]reflect.Value) - - for i := 0; i < v.NumField(); i++ { - // Include only public vars. https://golang.org/pkg/reflect/#StructField. - if len(v.Type().Field(i).PkgPath) == 0 { - f[v.Type().Field(i).Name] = v.Field(i) - } - } - - return f -} - -func incrementCoverageValues(name string, covered map[string]int) { - if i, ok := covered[name]; ok { - covered[name] = i + 1 - } else { - covered[name] = 1 - } -} - -func handleCovered(name string, coverage *OverallAPICoverage) { - if strings.HasPrefix(name, "route") { - incrementCoverageValues(name, coverage.RouteAPICovered) - } else if strings.HasPrefix(name, "configuration") { - incrementCoverageValues(name, coverage.ConfigurationAPICovered) - } else if strings.HasPrefix(name, "service") { - incrementCoverageValues(name, coverage.ServiceAPICovered) - } -} - -func handleNotCovered(name string, coverage *OverallAPICoverage) { - if strings.HasPrefix(name, "route") { - coverage.RouteAPINotCovered[name] = 0 - } else if strings.HasPrefix(name, "configuration") { - coverage.ConfigurationAPINotCovered[name] = 0 - } else if strings.HasPrefix(name, "service") { - coverage.ServiceAPINotCovered[name] = 0 - } -} - -func getCoverage(value reflect.Value, name string, coverage *OverallAPICoverage) { - // Parse all the fields in the struct - for key, v := range parseStruct(value) { - name := name + "." + key - if isStruct(v) { - getCoverage(v, name, coverage) - } else { - // check if it is empty/nil - if isNil(v) { - handleNotCovered(name, coverage) - } else { - handleCovered(name, coverage) - } - } - } -} - -func calculateCoverage(covLogs []string, coverage *OverallAPICoverage) { - if len(covLogs) == 0 { - return - } - - for _, f := range covLogs { - var obj ResourceObjects - if err := json.Unmarshal([]byte(f), &obj); err != nil { - log.Fatalf("Cannot read resource object: %v", err) - } else { - if obj.Route != nil { - getCoverage(reflect.ValueOf(obj.Route).Elem(), "route", coverage) - } else if obj.Configuration != nil { - getCoverage(reflect.ValueOf(obj.Configuration).Elem(), "configuration", coverage) - } else if obj.Service != nil { - getCoverage(reflect.ValueOf(obj.Service).Elem(), "service", coverage) - } - } - } -} - -func initCoverage() *OverallAPICoverage { - coverage := OverallAPICoverage{} - coverage.RouteAPICovered = make(map[string]int) - coverage.RouteAPINotCovered = make(map[string]int) - coverage.ConfigurationAPICovered = make(map[string]int) - coverage.ConfigurationAPINotCovered = make(map[string]int) - coverage.ServiceAPICovered = make(map[string]int) - coverage.ServiceAPINotCovered = make(map[string]int) - - return &coverage -} - -func getRelevantLogs(fields []string) *string { - // I0727 16:23:30.055] 2018-10-12T18:18:06.835-0700 info TestRouteCreation test/configuration.go:34 resource {: }"} - if len(fields) == 8 && fields[3] == "info" && fields[6] == "resource" { - s := strings.Join(fields[7:], " ") - return &s - } - return nil -} - -func createCases(tcName string, covered map[string]int, notCovered map[string]int) []testgrid.TestCase { - var tc []testgrid.TestCase - - var percentCovered = float32(100 * len(covered) / (len(covered) + len(notCovered))) - tp := []testgrid.TestProperty{testgrid.TestProperty{Name: apiCoverage, Value: percentCovered}} - tc = append(tc, testgrid.TestCase{Name: tcName, Properties: testgrid.TestProperties{Property: tp}, Fail: false}) - - for key, value := range covered { - tp := []testgrid.TestProperty{testgrid.TestProperty{Name: apiCoverage, Value: float32(value)}} - tc = append(tc, testgrid.TestCase{Name: tcName + "/" + key, Properties: testgrid.TestProperties{Property: tp}, Fail: false}) - } - - for key, value := range notCovered { - tp := []testgrid.TestProperty{testgrid.TestProperty{Name: apiCoverage, Value: float32(value)}} - tc = append(tc, testgrid.TestCase{Name: tcName + "/" + key, Properties: testgrid.TestProperties{Property: tp}, Fail: true}) - } - return tc -} - -func createTestgridXML(coverage *OverallAPICoverage, artifactsDir string) { - tc := createCases(overallRoute, coverage.RouteAPICovered, coverage.RouteAPINotCovered) - tc = append(tc, createCases(overallConfig, coverage.ConfigurationAPICovered, coverage.ConfigurationAPINotCovered)...) - tc = append(tc, createCases(overallService, coverage.ServiceAPICovered, coverage.ServiceAPINotCovered)...) - ts := testgrid.TestSuite{TestCases: tc} - - if err := testgrid.CreateXMLOutput(ts, artifactsDir); err != nil { - log.Fatalf("Cannot create the xml output file: %v", err) - } -} - -func main() { - - artifactsDir := flag.String("artifacts-dir", "./artifacts", "Directory to store the generated XML file") - serviceAccount := flag.String("service-account", os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"), "JSON key file for service account to use") - flag.Parse() - - // Read the latest-build.txt file to get the latest build number - ctx := context.Background() - num, err := gcs.GetLatestBuildNumber(ctx, logDir, *serviceAccount) - if err != nil { - log.Fatalf("Cannot get latest build number: %v", err) - } - - // Calculate coverage - coverage := initCoverage() - calculateCoverage( - gcs.ParseLog(ctx, fmt.Sprintf("%s/%d/%s", logDir, num, buildFile), getRelevantLogs), - coverage) - - // Write the testgrid xml to artifacts - createTestgridXML(coverage, *artifactsDir) -} diff --git a/vendor/github.com/knative/test-infra/tools/dep-collector/README.md b/vendor/github.com/knative/test-infra/tools/dep-collector/README.md deleted file mode 100644 index 9acf6cef809..00000000000 --- a/vendor/github.com/knative/test-infra/tools/dep-collector/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# dep-collector - -`dep-collector` is a tool for gathering up a collection of licenses for Go -dependencies that have been pulled into the idiomatic `vendor/` directory. -The resulting file from running `dep-collector` is intended for inclusion -in container images to respect the licenses of the included software. - -### Basic Usage - -You can run `dep-collector` on one or more Go import paths as entrypoints, -and it will: -1. Walk the transitive dependencies to identify vendored software packages, -1. Search for licenses for each vendored dependency, -1. Dump a file containing the licenses for each vendored import. - -For example (single import path): -```shell -$ dep-collector . -=========================================================== -Import: github.com/mattmoor/dep-collector/vendor/github.com/google/licenseclassifier - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ -... - -``` - -For example (multiple import paths): - -```shell -$ dep-collector ./cmd/controller ./cmd/sleeper - -=========================================================== -Import: github.com/mattmoor/warm-image/vendor/cloud.google.com/go - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ -``` - -### CSV Usage - -You can also run `dep-collector` in a mode that produces CSV output, -including basic classification of the license. - -> In order to run dep-collector in this mode, you must first run: -> go get github.com/google/licenseclassifier - -For example: - -```shell -$ dep-collector -csv . -github.com/google/licenseclassifier,Static,,https://github.com/mattmoor/dep-collector/blob/master/vendor/github.com/google/licenseclassifier/LICENSE,Apache-2.0 -github.com/google/licenseclassifier/stringclassifier,Static,,https://github.com/mattmoor/dep-collector/blob/master/vendor/github.com/google/licenseclassifier/stringclassifier/LICENSE,Apache-2.0 -github.com/sergi/go-diff,Static,,https://github.com/mattmoor/dep-collector/blob/master/vendor/github.com/sergi/go-diff/LICENSE,MIT - -``` - -The columns here are: -* Import Path, -* How the dependency is linked in (always reports "static"), -* A column for whether any modifications have been made (always empty), -* The URL by which to access the license file (assumes `master`), -* A classification of what license this is ([using this](https://github.com/google/licenseclassifier)). - - -### Check mode - -`dep-collector` also includes a mode that will check for "forbidden" licenses. - -> In order to run dep-collector in this mode, you must first run: -> go get github.com/google/licenseclassifier - -For example (failing): -```shell -$ dep-collector -check ./foo/bar/baz -2018/07/20 22:01:29 Error checking license collection: Errors validating licenses: -Found matching forbidden license in "foo.io/bar/vendor/github.com/BurntSushi/toml": WTFPL -``` - -For example (passing): - -```shell -$ dep-collector -check . -2018/07/20 22:29:09 No errors found. -``` diff --git a/vendor/github.com/knative/test-infra/tools/dep-collector/imports.go b/vendor/github.com/knative/test-infra/tools/dep-collector/imports.go deleted file mode 100644 index 924ce410228..00000000000 --- a/vendor/github.com/knative/test-infra/tools/dep-collector/imports.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - gb "go/build" - "path/filepath" - "sort" - "strings" -) - -func CollectTransitiveImports(binaries []string) ([]string, error) { - // Perform a simple DFS to collect the binaries' transitive dependencies. - visited := make(map[string]struct{}) - for _, importpath := range binaries { - if gb.IsLocalImport(importpath) { - ip, err := qualifyLocalImport(importpath) - if err != nil { - return nil, err - } - importpath = ip - } - - pkg, err := gb.Import(importpath, WorkingDir, gb.ImportComment) - if err != nil { - return nil, err - } - if err := visit(pkg, visited); err != nil { - return nil, err - } - } - - // Sort the dependencies deterministically. - var list sort.StringSlice - for ip := range visited { - if !strings.Contains(ip, "/vendor/") { - // Skip files outside of vendor - continue - } - list = append(list, ip) - } - list.Sort() - - return list, nil -} - -func qualifyLocalImport(ip string) (string, error) { - gopathsrc := filepath.Join(gb.Default.GOPATH, "src") - if !strings.HasPrefix(WorkingDir, gopathsrc) { - return "", fmt.Errorf("working directory must be on ${GOPATH}/src = ", gopathsrc) - } - return filepath.Join(strings.TrimPrefix(WorkingDir, gopathsrc+string(filepath.Separator)), ip), nil -} - -func visit(pkg *gb.Package, visited map[string]struct{}) error { - if _, ok := visited[pkg.ImportPath]; ok { - return nil - } - visited[pkg.ImportPath] = struct{}{} - - for _, ip := range pkg.Imports { - if ip == "C" { - // skip cgo - continue - } - subpkg, err := gb.Import(ip, WorkingDir, gb.ImportComment) - if err != nil { - return fmt.Errorf("%v\n -> %v", pkg.ImportPath, err) - } - if !strings.HasPrefix(subpkg.Dir, WorkingDir) { - // Skip import paths outside of our workspace (std library) - continue - } - if err := visit(subpkg, visited); err != nil { - return fmt.Errorf("%v (%v)\n -> %v", pkg.ImportPath, pkg.Dir, err) - } - } - return nil -} diff --git a/vendor/github.com/knative/test-infra/tools/dep-collector/licenses.go b/vendor/github.com/knative/test-infra/tools/dep-collector/licenses.go deleted file mode 100644 index cb1df9ab748..00000000000 --- a/vendor/github.com/knative/test-infra/tools/dep-collector/licenses.go +++ /dev/null @@ -1,203 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - gb "go/build" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/google/licenseclassifier" -) - -var LicenseNames = []string{ - "LICENCE", - "LICENSE", - "LICENSE.code", - "LICENSE.md", - "LICENSE.txt", - "COPYING", - "copyright", -} - -const MatchThreshold = 0.9 - -type LicenseFile struct { - EnclosingImportPath string - LicensePath string -} - -func (lf *LicenseFile) Body() (string, error) { - body, err := ioutil.ReadFile(lf.LicensePath) - if err != nil { - return "", err - } - return string(body), nil -} - -func (lt *LicenseFile) Classify(classifier *licenseclassifier.License) (string, error) { - body, err := lt.Body() - if err != nil { - return "", err - } - m := classifier.NearestMatch(body) - if m == nil { - return "", fmt.Errorf("unable to classify license: %v", lt.EnclosingImportPath) - } - return m.Name, nil -} - -func (lt *LicenseFile) Check(classifier *licenseclassifier.License) error { - body, err := lt.Body() - if err != nil { - return err - } - ms := classifier.MultipleMatch(body, false) - for _, m := range ms { - return fmt.Errorf("Found matching forbidden license in %q: %v", lt.EnclosingImportPath, m.Name) - } - return nil -} - -func (lt *LicenseFile) Entry() (string, error) { - body, err := lt.Body() - if err != nil { - return "", err - } - return fmt.Sprintf(` -=========================================================== -Import: %s - -%s -`, lt.EnclosingImportPath, body), nil -} - -func (lt *LicenseFile) CSVRow(classifier *licenseclassifier.License) (string, error) { - classification, err := lt.Classify(classifier) - if err != nil { - return "", err - } - parts := strings.Split(lt.EnclosingImportPath, "/vendor/") - if len(parts) != 2 { - return "", fmt.Errorf("wrong number of parts splitting import path on %q : %q", "/vendor/", lt.EnclosingImportPath) - } - return strings.Join([]string{ - parts[1], - "Static", - "", // TODO(mattmoor): Modifications? - "https://" + parts[0] + "/blob/master/vendor/" + parts[1] + "/" + filepath.Base(lt.LicensePath), - classification, - }, ","), nil -} - -func findLicense(ip string) (*LicenseFile, error) { - pkg, err := gb.Import(ip, WorkingDir, gb.ImportComment) - if err != nil { - return nil, err - } - dir := pkg.Dir - - for { - // When we reach the root of our workspace, stop searching. - if dir == WorkingDir { - return nil, fmt.Errorf("unable to find license for %q", pkg.ImportPath) - } - - for _, name := range LicenseNames { - p := filepath.Join(dir, name) - if _, err := os.Stat(p); err != nil { - continue - } - - return &LicenseFile{ - EnclosingImportPath: ip, - LicensePath: p, - }, nil - } - - // Walk to the parent directory / import path - dir = filepath.Dir(dir) - ip = filepath.Dir(ip) - } -} - -type LicenseCollection []*LicenseFile - -func (lc LicenseCollection) Entries() (string, error) { - sections := make([]string, 0, len(lc)) - for _, key := range lc { - entry, err := key.Entry() - if err != nil { - return "", err - } - sections = append(sections, entry) - } - return strings.Join(sections, "\n"), nil -} - -func (lc LicenseCollection) CSV(classifier *licenseclassifier.License) (string, error) { - sections := make([]string, 0, len(lc)) - for _, entry := range lc { - row, err := entry.CSVRow(classifier) - if err != nil { - return "", err - } - sections = append(sections, row) - } - return strings.Join(sections, "\n"), nil -} - -func (lc LicenseCollection) Check(classifier *licenseclassifier.License) error { - errors := []string{} - for _, entry := range lc { - if err := entry.Check(classifier); err != nil { - errors = append(errors, err.Error()) - } - } - if len(errors) == 0 { - return nil - } - return fmt.Errorf("Errors validating licenses:\n%v", strings.Join(errors, "\n")) -} - -func CollectLicenses(imports []string) (LicenseCollection, error) { - // for each of the import paths, search for a license file. - licenseFiles := make(map[string]*LicenseFile) - for _, ip := range imports { - lf, err := findLicense(ip) - if err != nil { - return nil, err - } - licenseFiles[lf.EnclosingImportPath] = lf - } - - order := sort.StringSlice{} - for key := range licenseFiles { - order = append(order, key) - } - order.Sort() - - licenseTypes := LicenseCollection{} - for _, key := range order { - licenseTypes = append(licenseTypes, licenseFiles[key]) - } - return licenseTypes, nil -} diff --git a/vendor/github.com/knative/test-infra/tools/dep-collector/main.go b/vendor/github.com/knative/test-infra/tools/dep-collector/main.go deleted file mode 100644 index 4532942751d..00000000000 --- a/vendor/github.com/knative/test-infra/tools/dep-collector/main.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "flag" - "log" - "os" - - "github.com/google/licenseclassifier" -) - -var WorkingDir, _ = os.Getwd() - -var ( - csv = flag.Bool("csv", false, "Whether to print in CSV format (with slow classification).") - check = flag.Bool("check", false, "Whether to just check license files for forbidden licenses.") -) - -func main() { - flag.Parse() - if flag.NArg() == 0 { - log.Fatalf("Expected a list of import paths, got: %v", flag.Args()) - } - - // Perform a simple DFS to collect the binaries' transitive dependencies. - transitiveImports, err := CollectTransitiveImports(flag.Args()) - if err != nil { - log.Fatalf("Error collecting transitive dependencies: %v", err) - } - - // Gather all of the license data from the imports. - collection, err := CollectLicenses(transitiveImports) - if err != nil { - log.Fatalf("Error identifying licenses for transitive dependencies: %v", err) - } - - if *check { - classifier, err := licenseclassifier.NewWithForbiddenLicenses(MatchThreshold) - if err != nil { - log.Fatalf("Error creating license classifier: %v", err) - } - if err := collection.Check(classifier); err != nil { - log.Fatalf("Error checking license collection: %v", err) - } - log.Printf("No errors found.") - return - } - - if *csv { - classifier, err := licenseclassifier.New(MatchThreshold) - if err != nil { - log.Fatalf("Error creating license classifier: %v", err) - } - output, err := collection.CSV(classifier) - if err != nil { - log.Fatalf("Error generating CSV: %v", err) - } - os.Stdout.Write([]byte(output)) - } else { - entries, err := collection.Entries() - if err != nil { - log.Fatalf("Error generating entries: %v", err) - } - os.Stdout.Write([]byte(entries)) - } -} diff --git a/vendor/github.com/knative/test-infra/tools/gcs/gcs.go b/vendor/github.com/knative/test-infra/tools/gcs/gcs.go deleted file mode 100644 index a41fbbb21a0..00000000000 --- a/vendor/github.com/knative/test-infra/tools/gcs/gcs.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// gcs.go defines functions to use GCS - -package gcs - -import ( - "bufio" - "context" - "fmt" - "io/ioutil" - "log" - "strconv" - "strings" - - "cloud.google.com/go/storage" - "google.golang.org/api/option" -) - -const ( - bucketName = "knative-prow" - latest = "latest-build.txt" -) - -var client *storage.Client - -func createStorageClient(ctx context.Context, sa string) error { - var err error - client, err = storage.NewClient(ctx, option.WithCredentialsFile(sa)) - return err -} - -func createStorageObject(filename string) *storage.ObjectHandle { - return client.Bucket(bucketName).Object(filename) -} - -// GetLatestBuildNumber gets the latest build number for the specified log directory -func GetLatestBuildNumber(ctx context.Context, logDir string, sa string) (int, error) { - contents, err := ReadGcsFile(ctx, logDir+latest, sa) - if err != nil { - return 0, err - } - latestBuild, err := strconv.Atoi(string(contents)) - if err != nil { - return 0, err - } - - return latestBuild, nil -} - -//ReadGcsFile reads the specified file using the provided service account -func ReadGcsFile(ctx context.Context, filename string, sa string) ([]byte, error) { - // Create a new GCS client - if err := createStorageClient(ctx, sa); err != nil { - log.Fatalf("Failed to create GCS client: %v", err) - } - o := createStorageObject(filename) - if _, err := o.Attrs(ctx); err != nil { - return []byte(fmt.Sprintf("Cannot get attributes of '%s'", filename)), err - } - f, err := o.NewReader(ctx) - if err != nil { - return []byte(fmt.Sprintf("Cannot open '%s'", filename)), err - } - defer f.Close() - contents, err := ioutil.ReadAll(f) - if err != nil { - return []byte(fmt.Sprintf("Cannot read '%s'", filename)), err - } - return contents, nil -} - -// ParseLog parses the log and returns the lines where the checkLog func does not return an empty slice. -// checkLog function should take in the log statement and return a part from that statement that should be in the log output. -func ParseLog(ctx context.Context, filename string, checkLog func(s []string) *string) []string { - var logs []string - - log.Printf("Parsing '%s'", filename) - o := createStorageObject(filename) - if _, err := o.Attrs(ctx); err != nil { - log.Printf("Cannot get attributes of '%s', assuming not ready yet: %v", filename, err) - return nil - } - f, err := o.NewReader(ctx) - if err != nil { - log.Fatalf("Error opening '%s': %v", filename, err) - } - defer f.Close() - - scanner := bufio.NewScanner(f) - - for scanner.Scan() { - if s := checkLog(strings.Fields(scanner.Text())); s != nil { - logs = append(logs, *s) - } - } - return logs -} diff --git a/vendor/github.com/knative/test-infra/tools/githubhelper/Makefile b/vendor/github.com/knative/test-infra/tools/githubhelper/Makefile deleted file mode 100644 index c8fef33a770..00000000000 --- a/vendor/github.com/knative/test-infra/tools/githubhelper/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2018 The Knative Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -all: - go get -u github.com/google/go-github/github - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build . diff --git a/vendor/github.com/knative/test-infra/tools/githubhelper/README.md b/vendor/github.com/knative/test-infra/tools/githubhelper/README.md deleted file mode 100644 index 5975a23521b..00000000000 --- a/vendor/github.com/knative/test-infra/tools/githubhelper/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# GitHub Helper Tool - -This tool is designed to interact with GitHub, providing useful data for a Prow job. Actions performed and the output are governed by the flags used. - -Currently the tool makes unauthenticated requests to GitHub API. - -## Flags - -* `-list-changed-files` will list the files that are touched by the current PR in a Prow job. -* `-verbose` will dump extra info on output when executing the comments; it is intended for debugging. diff --git a/vendor/github.com/knative/test-infra/tools/githubhelper/githubhelper.go b/vendor/github.com/knative/test-infra/tools/githubhelper/githubhelper.go deleted file mode 100644 index d45fad475cc..00000000000 --- a/vendor/github.com/knative/test-infra/tools/githubhelper/githubhelper.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// githubhelper.go interacts with GitHub, providing useful data for a Prow job. - -package main - -import ( - "context" - "flag" - "fmt" - "log" - "os" - "strconv" - - "github.com/google/go-github/github" -) - -var ( - // Info about the current PR - repoOwner = os.Getenv("REPO_OWNER") - repoName = os.Getenv("REPO_NAME") - pullNumber = atoi(os.Getenv("PULL_NUMBER"), "pull number") - - // Shared useful variables - ctx = context.Background() - onePageList = &github.ListOptions{Page: 1} - verbose = false - anonymousGitHubClient *github.Client -) - -// atoi is a convenience function to convert a string to integer, failing in case of error. -func atoi(str, valueName string) int { - value, err := strconv.Atoi(str) - if err != nil { - log.Fatalf("Unexpected non number '%s' for %s: %v", str, valueName, err) - } - return value -} - -// infof if a convenience wrapper around log.Infof, and does nothing unless --verbose is passed. -func infof(template string, args ...interface{}) { - if verbose { - log.Printf(template, args...) - } -} - -// listChangedFiles simply lists the files changed by the current PR. -func listChangedFiles() { - infof("Listing changed files for PR %d in repository %s/%s", pullNumber, repoOwner, repoName) - files, _, err := anonymousGitHubClient.PullRequests.ListFiles(ctx, repoOwner, repoName, pullNumber, onePageList) - if err != nil { - log.Fatalf("Error listing files: %v", err) - } - for _, file := range files { - fmt.Println(*file.Filename) - } -} - -func main() { - listChangedFilesFlag := flag.Bool("list-changed-files", false, "List the files changed by the current pull request") - verboseFlag := flag.Bool("verbose", false, "Whether to dump extra info on output or not; intended for debugging") - flag.Parse() - - verbose = *verboseFlag - anonymousGitHubClient = github.NewClient(nil) - - if *listChangedFilesFlag { - listChangedFiles() - } -} - diff --git a/vendor/github.com/knative/test-infra/tools/testgrid/testgrid.go b/vendor/github.com/knative/test-infra/tools/testgrid/testgrid.go deleted file mode 100644 index 30d7ff2c13c..00000000000 --- a/vendor/github.com/knative/test-infra/tools/testgrid/testgrid.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// testgrid.go provides methods to perform action on testgrid. - -package testgrid - -import ( - "encoding/xml" - "os" -) - -// TestProperty defines a property of the test -type TestProperty struct { - Name string `xml:"name,attr"` - Value float32 `xml:"value,attr"` -} - -// TestProperties is an array of test properties -type TestProperties struct { - Property []TestProperty `xml:"property"` -} - -// TestCase defines a test case that was executed -type TestCase struct { - ClassName string `xml:"class_name,attr"` - Name string `xml:"name,attr"` - Time int `xml:"time,attr"` - Properties TestProperties `xml:"properties"` - Fail bool `xml:"failure,omitempty"` -} - -// TestSuite defines the set of relevant test cases -type TestSuite struct { - XMLName xml.Name `xml:"testsuite"` - TestCases []TestCase `xml:"testcase"` -} - -// CreateXMLOutput creates the junit xml file in the provided artifacts directory -func CreateXMLOutput(ts TestSuite, artifactsDir string) error { - op, err := xml.MarshalIndent(ts, "", " ") - if err != nil { - return err - } - - outputFile := artifactsDir + "/junit_bazel.xml" - f, err := os.Create(outputFile) - if err != nil { - return err - } - defer f.Close() - if _, err := f.WriteString(string(op) + "\n"); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE new file mode 100644 index 00000000000..bd899d8353d --- /dev/null +++ b/vendor/github.com/pierrec/lz4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go new file mode 100644 index 00000000000..00b1111b92b --- /dev/null +++ b/vendor/github.com/pierrec/lz4/block.go @@ -0,0 +1,397 @@ +package lz4 + +import ( + "encoding/binary" + "errors" +) + +var ( + // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed + // block is corrupted or the destination buffer is not large enough for the uncompressed data. + ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short") + // ErrInvalid is returned when reading an invalid LZ4 archive. + ErrInvalid = errors.New("lz4: bad magic number") +) + +// blockHash hashes 4 bytes into a value < winSize. +func blockHash(x uint32) uint32 { + const hasher uint32 = 2654435761 // Knuth multiplicative hash. + return x * hasher >> hashShift +} + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return n + n/255 + 16 +} + +// UncompressBlock uncompresses the source buffer into the destination one, +// and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte) (si int, err error) { + defer func() { + // It is now faster to let the runtime panic and recover on out of bound slice access + // than checking indices as we go along. + if recover() != nil { + err = ErrInvalidSourceShortBuffer + } + }() + sn := len(src) + if sn == 0 { + return 0, nil + } + var di int + + for { + // Literals and match lengths (token). + b := int(src[si]) + si++ + + // Literals. + if lLen := b >> 4; lLen > 0 { + if lLen == 0xF { + for src[si] == 0xFF { + lLen += 0xFF + si++ + } + lLen += int(src[si]) + si++ + } + i := si + si += lLen + di += copy(dst[di:], src[i:si]) + + if si >= sn { + return di, nil + } + } + + si++ + _ = src[si] // Bound check elimination. + offset := int(src[si-1]) | int(src[si])<<8 + si++ + + // Match. + mLen := b & 0xF + if mLen == 0xF { + for src[si] == 0xFF { + mLen += 0xFF + si++ + } + mLen += int(src[si]) + si++ + } + mLen += minMatch + + // Copy the match. + i := di - offset + if offset > 0 && mLen >= offset { + // Efficiently copy the match dst[di-offset:di] into the dst slice. + bytesToCopy := offset * (mLen / offset) + expanded := dst[i:] + for n := offset; n <= bytesToCopy+offset; n *= 2 { + copy(expanded[n:], expanded[:n]) + } + di += bytesToCopy + mLen -= bytesToCopy + } + di += copy(dst[di:], dst[i:i+mLen]) + } +} + +// CompressBlock compresses the source buffer into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// The size of hashTable must be at least 64Kb. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { + defer func() { + if recover() != nil { + err = ErrInvalidSourceShortBuffer + } + }() + + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 { + return 0, nil + } + var si int + + // Fast scan strategy: the hash table only stores the last 4 bytes sequences. + // const accInit = 1 << skipStrength + + anchor := si // Position of the current literals. + // acc := accInit // Variable step: improves performance on non-compressible data. + + for si < sn { + // Hash the next 4 bytes (sequence)... + match := binary.LittleEndian.Uint32(src[si:]) + h := blockHash(match) + + ref := hashTable[h] + hashTable[h] = si + if ref >= sn { // Invalid reference (dirty hashtable). + si++ + continue + } + offset := si - ref + if offset <= 0 || offset >= winSize || // Out of window. + match != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. + // si += acc >> skipStrength + // acc++ + si++ + continue + } + + // Match found. + // acc = accInit + lLen := si - anchor // Literal length. + + // Encode match length part 1. + si += minMatch + mLen := si // Match length has minMatch already. + // Find the longest match, first looking by batches of 8 bytes. + for si < sn && binary.LittleEndian.Uint64(src[si:]) == binary.LittleEndian.Uint64(src[si-offset:]) { + si += 8 + } + // Then byte by byte. + for si < sn && src[si] == src[si-offset] { + si++ + } + + mLen = si - mLen + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:], src[anchor:anchor+lLen]) + di += lLen + 2 + anchor = si + + // Encode offset. + _ = dst[di] // Bound check elimination. + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + } + + if anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:], src[anchor:]) + return di, nil +} + +// CompressBlockHC compresses the source buffer src into the destination dst +// with max search depth (use 0 or negative value for no max). +// +// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { + defer func() { + if recover() != nil { + err = ErrInvalidSourceShortBuffer + } + }() + + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 { + return 0, nil + } + var si int + + // hashTable: stores the last position found for a given hash + // chaingTable: stores previous positions for a given hash + var hashTable, chainTable [winSize]int + + if depth <= 0 { + depth = winSize + } + + anchor := si + for si < sn { + // Hash the next 4 bytes (sequence). + match := binary.LittleEndian.Uint32(src[si:]) + h := blockHash(match) + + // Follow the chain until out of window and give the longest match. + mLen := 0 + offset := 0 + for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] { + // The first (mLen==0) or next byte (mLen>=minMatch) at current match length + // must match to improve on the match length. + if src[next+mLen] != src[si+mLen] { + continue + } + ml := 0 + // Compare the current position with a previous with the same hash. + for ml < sn-si && binary.LittleEndian.Uint64(src[next+ml:]) == binary.LittleEndian.Uint64(src[si+ml:]) { + ml += 8 + } + for ml < sn-si && src[next+ml] == src[si+ml] { + ml++ + } + if ml < minMatch || ml <= mLen { + // Match too small ( winStart { + winStart = ws + } + for si, ml := winStart, si+mLen; si < ml; { + match >>= 8 + match |= uint32(src[si+3]) << 24 + h := blockHash(match) + chainTable[si&winMask] = hashTable[h] + hashTable[h] = si + si++ + } + + lLen := si - anchor + si += mLen + mLen -= minMatch // Match length does not include minMatch. + + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:], src[anchor:anchor+lLen]) + di += lLen + anchor = si + + // Encode offset. + di += 2 + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + } + + if anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:], src[anchor:]) + return di, nil +} diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go new file mode 100644 index 00000000000..bc5e78d40f0 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/debug.go @@ -0,0 +1,23 @@ +// +build lz4debug + +package lz4 + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +const debugFlag = true + +func debug(args ...interface{}) { + _, file, line, _ := runtime.Caller(1) + file = filepath.Base(file) + + f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0]) + if f[len(f)-1] != '\n' { + f += "\n" + } + fmt.Fprintf(os.Stderr, f, args[1:]...) +} diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go new file mode 100644 index 00000000000..44211ad9645 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/debug_stub.go @@ -0,0 +1,7 @@ +// +build !lz4debug + +package lz4 + +const debugFlag = false + +func debug(args ...interface{}) {} diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go new file mode 100644 index 00000000000..850a6fdf614 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go @@ -0,0 +1,222 @@ +// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). +// (https://github.com/Cyan4973/XXH/) +package xxh32 + +import ( + "encoding/binary" +) + +const ( + prime32_1 uint32 = 2654435761 + prime32_2 uint32 = 2246822519 + prime32_3 uint32 = 3266489917 + prime32_4 uint32 = 668265263 + prime32_5 uint32 = 374761393 + + prime32_1plus2 uint32 = 606290984 + prime32_minus1 uint32 = 1640531535 +) + +// XXHZero represents an xxhash32 object with seed 0. +type XXHZero struct { + v1 uint32 + v2 uint32 + v3 uint32 + v4 uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh XXHZero) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *XXHZero) Reset() { + xxh.v1 = prime32_1plus2 + xxh.v2 = prime32_2 + xxh.v3 = 0 + xxh.v4 = prime32_minus1 + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *XXHZero) Size() int { + return 4 +} + +// BlockSize gives the minimum number of bytes accepted by Write(). +func (xxh *XXHZero) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *XXHZero) Write(input []byte) (int, error) { + if xxh.totalLen == 0 { + xxh.Reset() + } + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + p := 0 + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 + if m > 0 { + // some data left from previous update + copy(xxh.buf[xxh.bufused:], input[:r]) + xxh.bufused += len(input) - r + + // fast rotl(13) + buf := xxh.buf[:16] // BCE hint. + v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime32_2) * prime32_1 + v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime32_2) * prime32_1 + v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime32_2) * prime32_1 + v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime32_2) * prime32_1 + p = r + xxh.bufused = 0 + } + + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime32_2) * prime32_1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime32_2) * prime32_1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime32_2) * prime32_1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime32_2) * prime32_1 + } + xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 + + copy(xxh.buf[xxh.bufused:], input[p:]) + xxh.bufused += len(input) - p + + return n, nil +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *XXHZero) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if h32 >= 16 { + h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) + } else { + h32 += prime32_5 + } + + p := 0 + n := xxh.bufused + buf := xxh.buf + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime32_3 + h32 = rol17(h32) * prime32_4 + } + for ; p < n; p++ { + h32 += uint32(buf[p]) * prime32_5 + h32 = rol11(h32) * prime32_1 + } + + h32 ^= h32 >> 15 + h32 *= prime32_2 + h32 ^= h32 >> 13 + h32 *= prime32_3 + h32 ^= h32 >> 16 + + return h32 +} + +// ChecksumZero returns the 32bits Hash value. +func ChecksumZero(input []byte) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += prime32_5 + } else { + v1 := prime32_1plus2 + v2 := prime32_2 + v3 := uint32(0) + v4 := prime32_minus1 + p := 0 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime32_2) * prime32_1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime32_2) * prime32_1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime32_2) * prime32_1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime32_2) * prime32_1 + } + input = input[p:] + n -= p + h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + } + + p := 0 + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime32_3 + h32 = rol17(h32) * prime32_4 + } + for p < n { + h32 += uint32(input[p]) * prime32_5 + h32 = rol11(h32) * prime32_1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime32_2 + h32 ^= h32 >> 13 + h32 *= prime32_3 + h32 ^= h32 >> 16 + + return h32 +} + +// Uint32Zero hashes x with seed 0. +func Uint32Zero(x uint32) uint32 { + h := prime32_5 + 4 + x*prime32_3 + h = rol17(h) * prime32_4 + h ^= h >> 15 + h *= prime32_2 + h ^= h >> 13 + h *= prime32_3 + h ^= h >> 16 + return h +} + +func rol1(u uint32) uint32 { + return u<<1 | u>>31 +} + +func rol7(u uint32) uint32 { + return u<<7 | u>>25 +} + +func rol11(u uint32) uint32 { + return u<<11 | u>>21 +} + +func rol12(u uint32) uint32 { + return u<<12 | u>>20 +} + +func rol13(u uint32) uint32 { + return u<<13 | u>>19 +} + +func rol17(u uint32) uint32 { + return u<<17 | u>>15 +} + +func rol18(u uint32) uint32 { + return u<<18 | u>>14 +} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go new file mode 100644 index 00000000000..35802756c48 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4.go @@ -0,0 +1,68 @@ +// Package lz4 implements reading and writing lz4 compressed data (a frame), +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. +// +// Although the block level compression and decompression functions are exposed and are fully compatible +// with the lz4 block format definition, they are low level and should not be used directly. +// For a complete description of an lz4 compressed block, see: +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html +// +// See https://github.com/Cyan4973/lz4 for the reference C implementation. +// +package lz4 + +const ( + // Extension is the LZ4 frame file name extension + Extension = ".lz4" + // Version is the LZ4 frame format version + Version = 1 + + frameMagic uint32 = 0x184D2204 + frameSkipMagic uint32 = 0x184D2A50 + + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + compressedBlockFlag = 1 << 31 + compressedBlockMask = compressedBlockFlag - 1 + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise. + hashLog = 16 + hashTableSize = 1 << hashLog + hashShift = uint((minMatch * 8) - hashLog) + + mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. + skipStrength = 6 // variable step for fast scan +) + +// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. +var ( + bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20} + bsMapValue = make(map[int]byte, len(bsMapID)) +) + +// Reversed. +func init() { + for i, v := range bsMapID { + bsMapValue[v] = i + } +} + +// Header describes the various flags that can be set on a Writer or obtained from a Reader. +// The default values match those of the LZ4 frame format definition +// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). +// +// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. +// It is the caller responsibility to check them if necessary. +type Header struct { + BlockChecksum bool // Compressed blocks checksum flag. + NoChecksum bool // Frame checksum flag. + BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. + Size uint64 // Frame total size. It is _not_ computed by the Writer. + CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). + done bool // Header processed flag (Read or Write and checked). +} diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go new file mode 100644 index 00000000000..9a0fb00709d --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4_go1.10.go @@ -0,0 +1,29 @@ +//+build go1.10 + +package lz4 + +import ( + "fmt" + "strings" +) + +func (h Header) String() string { + var s strings.Builder + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go new file mode 100644 index 00000000000..12c761a2e7f --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go @@ -0,0 +1,29 @@ +//+build !go1.10 + +package lz4 + +import ( + "bytes" + "fmt" +) + +func (h Header) String() string { + var s bytes.Buffer + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go new file mode 100644 index 00000000000..f08db47df7b --- /dev/null +++ b/vendor/github.com/pierrec/lz4/reader.go @@ -0,0 +1,295 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "io" + "io/ioutil" + + "github.com/pierrec/lz4/internal/xxh32" +) + +// Reader implements the LZ4 frame decoder. +// The Header is set after the first call to Read(). +// The Header may change between Read() calls in case of concatenated frames. +type Reader struct { + Header + + buf [8]byte // Scrap buffer. + pos int64 // Current position in src. + src io.Reader // Source. + zdata []byte // Compressed data. + data []byte // Uncompressed data. + idx int // Index of unread bytes into data. + checksum xxh32.XXHZero // Frame hash. +} + +// NewReader returns a new LZ4 frame decoder. +// No access to the underlying io.Reader is performed. +func NewReader(src io.Reader) *Reader { + r := &Reader{src: src} + return r +} + +// readHeader checks the frame magic number and parses the frame descriptoz. +// Skippable frames are supported even as a first frame although the LZ4 +// specifications recommends skippable frames not to be used as first frames. +func (z *Reader) readHeader(first bool) error { + defer z.checksum.Reset() + + buf := z.buf[:] + for { + magic, err := z.readUint32() + if err != nil { + z.pos += 4 + if !first && err == io.ErrUnexpectedEOF { + return io.EOF + } + return err + } + if magic == frameMagic { + break + } + if magic>>8 != frameSkipMagic>>8 { + return ErrInvalid + } + skipSize, err := z.readUint32() + if err != nil { + return err + } + z.pos += 4 + m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) + if err != nil { + return err + } + z.pos += m + } + + // Header. + if _, err := io.ReadFull(z.src, buf[:2]); err != nil { + return err + } + z.pos += 8 + + b := buf[0] + if v := b >> 6; v != Version { + return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version) + } + if b>>5&1 == 0 { + return fmt.Errorf("lz4: block dependency not supported") + } + z.BlockChecksum = b>>4&1 > 0 + frameSize := b>>3&1 > 0 + z.NoChecksum = b>>2&1 == 0 + + bmsID := buf[1] >> 4 & 0x7 + bSize, ok := bsMapID[bmsID] + if !ok { + return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID) + } + z.BlockMaxSize = bSize + + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + if n := 2 * bSize; cap(z.zdata) < n { + z.zdata = make([]byte, n, n) + } + if debugFlag { + debug("header block max size id=%d size=%d", bmsID, bSize) + } + z.zdata = z.zdata[:bSize] + z.data = z.zdata[:cap(z.zdata)][bSize:] + z.idx = len(z.data) + + z.checksum.Write(buf[0:2]) + + if frameSize { + buf := buf[:8] + if _, err := io.ReadFull(z.src, buf); err != nil { + return err + } + z.Size = binary.LittleEndian.Uint64(buf) + z.pos += 8 + z.checksum.Write(buf) + } + + // Header checksum. + if _, err := io.ReadFull(z.src, buf[:1]); err != nil { + return err + } + z.pos++ + if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { + return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h) + } + + z.Header.done = true + if debugFlag { + debug("header read: %v", z.Header) + } + + return nil +} + +// Read decompresses data from the underlying source into the supplied buffer. +// +// Since there can be multiple streams concatenated, Header values may +// change between calls to Read(). If that is the case, no data is actually read from +// the underlying io.Reader, to allow for potential input buffer resizing. +func (z *Reader) Read(buf []byte) (int, error) { + if debugFlag { + debug("Read buf len=%d", len(buf)) + } + if !z.Header.done { + if err := z.readHeader(true); err != nil { + return 0, err + } + if debugFlag { + debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", + len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) + } + } + + if len(buf) == 0 { + return 0, nil + } + + if z.idx == len(z.data) { + // No data ready for reading, process the next block. + if debugFlag { + debug("reading block from writer") + } + // Block length: 0 = end of frame, highest bit set: uncompressed. + bLen, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if bLen == 0 { + // End of frame reached. + if !z.NoChecksum { + // Validate the frame checksum. + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + if debugFlag { + debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum) + } + z.pos += 4 + if h := z.checksum.Sum32(); checksum != h { + return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum) + } + } + + // Get ready for the next concatenated frame and keep the position. + pos := z.pos + z.Reset(z.src) + z.pos = pos + + // Since multiple frames can be concatenated, check for more. + return 0, z.readHeader(false) + } + + if debugFlag { + debug("raw block size %d", bLen) + } + if bLen&compressedBlockFlag > 0 { + // Uncompressed block. + bLen &= compressedBlockMask + if debugFlag { + debug("uncompressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + z.data = z.data[:bLen] + if _, err := io.ReadFull(z.src, z.data); err != nil { + return 0, err + } + z.pos += int64(bLen) + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(z.data); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + + } else { + // Compressed block. + if debugFlag { + debug("compressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + zdata := z.zdata[:bLen] + if _, err := io.ReadFull(z.src, zdata); err != nil { + return 0, err + } + z.pos += int64(bLen) + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(zdata); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + + n, err := UncompressBlock(zdata, z.data) + if err != nil { + return 0, err + } + z.data = z.data[:n] + } + + if !z.NoChecksum { + z.checksum.Write(z.data) + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) + } + } + z.idx = 0 + } + + n := copy(buf, z.data[z.idx:]) + z.idx += n + if debugFlag { + debug("copied %d bytes to input", n) + } + + return n, nil +} + +// Reset discards the Reader's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) { + z.Header = Header{} + z.pos = 0 + z.src = r + z.zdata = z.zdata[:0] + z.data = z.data[:0] + z.idx = 0 + z.checksum.Reset() +} + +// readUint32 reads an uint32 into the supplied buffer. +// The idea is to make use of the already allocated buffers avoiding additional allocations. +func (z *Reader) readUint32() (uint32, error) { + buf := z.buf[:4] + _, err := io.ReadFull(z.src, buf) + x := binary.LittleEndian.Uint32(buf) + return x, err +} diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go new file mode 100644 index 00000000000..0120438025d --- /dev/null +++ b/vendor/github.com/pierrec/lz4/writer.go @@ -0,0 +1,267 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/pierrec/lz4/internal/xxh32" +) + +// Writer implements the LZ4 frame encoder. +type Writer struct { + Header + + buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes + dst io.Writer // Destination. + checksum xxh32.XXHZero // Frame checksum. + zdata []byte // Compressed data. + data []byte // Data to be compressed. + idx int // Index into data. + hashtable [winSize]int // Hash table used in CompressBlock(). +} + +// NewWriter returns a new LZ4 frame encoder. +// No access to the underlying io.Writer is performed. +// The supplied Header is checked at the first Write. +// It is ok to change it before the first Write but then not until a Reset() is performed. +func NewWriter(dst io.Writer) *Writer { + return &Writer{dst: dst} +} + +// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. +func (z *Writer) writeHeader() error { + // Default to 4Mb if BlockMaxSize is not set. + if z.Header.BlockMaxSize == 0 { + z.Header.BlockMaxSize = bsMapID[7] + } + // The only option that needs to be validated. + bSize := z.Header.BlockMaxSize + bSizeID, ok := bsMapValue[bSize] + if !ok { + return fmt.Errorf("lz4: invalid block max size: %d", bSize) + } + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + if n := 2 * bSize; cap(z.zdata) < n { + z.zdata = make([]byte, n, n) + } + z.zdata = z.zdata[:bSize] + z.data = z.zdata[:cap(z.zdata)][bSize:] + z.idx = 0 + + // Size is optional. + buf := z.buf[:] + + // Set the fixed size data: magic number, block max size and flags. + binary.LittleEndian.PutUint32(buf[0:], frameMagic) + flg := byte(Version << 6) + flg |= 1 << 5 // No block dependency. + if z.Header.BlockChecksum { + flg |= 1 << 4 + } + if z.Header.Size > 0 { + flg |= 1 << 3 + } + if !z.Header.NoChecksum { + flg |= 1 << 2 + } + buf[4] = flg + buf[5] = bSizeID << 4 + + // Current buffer size: magic(4) + flags(1) + block max size (1). + n := 6 + // Optional items. + if z.Header.Size > 0 { + binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) + n += 8 + } + + // The header checksum includes the flags, block max size and optional Size. + buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF) + z.checksum.Reset() + + // Header ready, write it out. + if _, err := z.dst.Write(buf[0 : n+1]); err != nil { + return err + } + z.Header.done = true + if debugFlag { + debug("wrote header %v", z.Header) + } + + return nil +} + +// Write compresses data from the supplied buffer into the underlying io.Writer. +// Write does not return until the data has been written. +func (z *Writer) Write(buf []byte) (int, error) { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return 0, err + } + } + if debugFlag { + debug("input buffer len=%d index=%d", len(buf), z.idx) + } + + zn := len(z.data) + var n int + for len(buf) > 0 { + if z.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err := z.compressBlock(buf[:zn]); err != nil { + return n, err + } + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(z.data[z.idx:], buf) + n += m + z.idx += m + buf = buf[m:] + if debugFlag { + debug("%d bytes copied to buf, current index %d", n, z.idx) + } + + if z.idx < len(z.data) { + // Buffer not filled. + if debugFlag { + debug("need more data for compression") + } + return n, nil + } + + // Buffer full. + if err := z.compressBlock(z.data); err != nil { + return n, err + } + z.idx = 0 + } + + return n, nil +} + +// compressBlock compresses a block. +func (z *Writer) compressBlock(data []byte) error { + if !z.NoChecksum { + z.checksum.Write(data) + } + + // The compressed block size cannot exceed the input's. + var zn int + var err error + + if level := z.Header.CompressionLevel; level != 0 { + zn, err = CompressBlockHC(data, z.zdata, level) + } else { + zn, err = CompressBlock(data, z.zdata, z.hashtable[:]) + } + + var zdata []byte + var bLen uint32 + if debugFlag { + debug("block compression %d => %d", len(data), zn) + } + if err == nil && zn > 0 && zn < len(data) { + // Compressible and compressed size smaller than uncompressed: ok! + bLen = uint32(zn) + zdata = z.zdata[:zn] + } else { + // Uncompressed block. + bLen = uint32(len(data)) | compressedBlockFlag + zdata = data + } + if debugFlag { + debug("block compression to be written len=%d data len=%d", bLen, len(zdata)) + } + + // Write the block. + if err := z.writeUint32(bLen); err != nil { + return err + } + if _, err := z.dst.Write(zdata); err != nil { + return err + } + + if z.BlockChecksum { + checksum := xxh32.ChecksumZero(zdata) + if debugFlag { + debug("block checksum %x", checksum) + } + if err := z.writeUint32(checksum); err != nil { + return err + } + } + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) + } + + return nil +} + +// Flush flushes any pending compressed data to the underlying writer. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +func (z *Writer) Flush() error { + if debugFlag { + debug("flush with index %d", z.idx) + } + if z.idx == 0 { + return nil + } + + return z.compressBlock(z.data[:z.idx]) +} + +// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return err + } + } + + if err := z.Flush(); err != nil { + return err + } + + if debugFlag { + debug("writing last empty block") + } + if err := z.writeUint32(0); err != nil { + return err + } + if !z.NoChecksum { + checksum := z.checksum.Sum32() + if debugFlag { + debug("stream checksum %x", checksum) + } + if err := z.writeUint32(checksum); err != nil { + return err + } + } + return nil +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriter, but instead writing to w. +// No access to the underlying io.Writer is performed. +func (z *Writer) Reset(w io.Writer) { + z.Header = Header{} + z.dst = w + z.checksum.Reset() + z.zdata = z.zdata[:0] + z.data = z.data[:0] + z.idx = 0 +} + +// writeUint32 writes a uint32 to the underlying writer. +func (z *Writer) writeUint32(x uint32) error { + buf := z.buf[:4] + binary.LittleEndian.PutUint32(buf, x) + _, err := z.dst.Write(buf) + return err +} diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE new file mode 100644 index 00000000000..363fa9ee77b --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/LICENSE @@ -0,0 +1,29 @@ +Copyright 2012 Richard Crowley. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation +are those of the authors and should not be interpreted as representing +official policies, either expressed or implied, of Richard Crowley. diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go new file mode 100644 index 00000000000..bb7b039cb57 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/counter.go @@ -0,0 +1,112 @@ +package metrics + +import "sync/atomic" + +// Counters hold an int64 value that can be incremented and decremented. +type Counter interface { + Clear() + Count() int64 + Dec(int64) + Inc(int64) + Snapshot() Counter +} + +// GetOrRegisterCounter returns an existing Counter or constructs and registers +// a new StandardCounter. +func GetOrRegisterCounter(name string, r Registry) Counter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewCounter).(Counter) +} + +// NewCounter constructs a new StandardCounter. +func NewCounter() Counter { + if UseNilMetrics { + return NilCounter{} + } + return &StandardCounter{0} +} + +// NewRegisteredCounter constructs and registers a new StandardCounter. +func NewRegisteredCounter(name string, r Registry) Counter { + c := NewCounter() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// CounterSnapshot is a read-only copy of another Counter. +type CounterSnapshot int64 + +// Clear panics. +func (CounterSnapshot) Clear() { + panic("Clear called on a CounterSnapshot") +} + +// Count returns the count at the time the snapshot was taken. +func (c CounterSnapshot) Count() int64 { return int64(c) } + +// Dec panics. +func (CounterSnapshot) Dec(int64) { + panic("Dec called on a CounterSnapshot") +} + +// Inc panics. +func (CounterSnapshot) Inc(int64) { + panic("Inc called on a CounterSnapshot") +} + +// Snapshot returns the snapshot. +func (c CounterSnapshot) Snapshot() Counter { return c } + +// NilCounter is a no-op Counter. +type NilCounter struct{} + +// Clear is a no-op. +func (NilCounter) Clear() {} + +// Count is a no-op. +func (NilCounter) Count() int64 { return 0 } + +// Dec is a no-op. +func (NilCounter) Dec(i int64) {} + +// Inc is a no-op. +func (NilCounter) Inc(i int64) {} + +// Snapshot is a no-op. +func (NilCounter) Snapshot() Counter { return NilCounter{} } + +// StandardCounter is the standard implementation of a Counter and uses the +// sync/atomic package to manage a single int64 value. +type StandardCounter struct { + count int64 +} + +// Clear sets the counter to zero. +func (c *StandardCounter) Clear() { + atomic.StoreInt64(&c.count, 0) +} + +// Count returns the current count. +func (c *StandardCounter) Count() int64 { + return atomic.LoadInt64(&c.count) +} + +// Dec decrements the counter by the given amount. +func (c *StandardCounter) Dec(i int64) { + atomic.AddInt64(&c.count, -i) +} + +// Inc increments the counter by the given amount. +func (c *StandardCounter) Inc(i int64) { + atomic.AddInt64(&c.count, i) +} + +// Snapshot returns a read-only copy of the counter. +func (c *StandardCounter) Snapshot() Counter { + return CounterSnapshot(c.Count()) +} diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go new file mode 100644 index 00000000000..043ccefab61 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/debug.go @@ -0,0 +1,76 @@ +package metrics + +import ( + "runtime/debug" + "time" +) + +var ( + debugMetrics struct { + GCStats struct { + LastGC Gauge + NumGC Gauge + Pause Histogram + //PauseQuantiles Histogram + PauseTotal Gauge + } + ReadGCStats Timer + } + gcStats debug.GCStats +) + +// Capture new values for the Go garbage collector statistics exported in +// debug.GCStats. This is designed to be called as a goroutine. +func CaptureDebugGCStats(r Registry, d time.Duration) { + for _ = range time.Tick(d) { + CaptureDebugGCStatsOnce(r) + } +} + +// Capture new values for the Go garbage collector statistics exported in +// debug.GCStats. This is designed to be called in a background goroutine. +// Giving a registry which has not been given to RegisterDebugGCStats will +// panic. +// +// Be careful (but much less so) with this because debug.ReadGCStats calls +// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world +// operation, isn't something you want to be doing all the time. +func CaptureDebugGCStatsOnce(r Registry) { + lastGC := gcStats.LastGC + t := time.Now() + debug.ReadGCStats(&gcStats) + debugMetrics.ReadGCStats.UpdateSince(t) + + debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano())) + debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC)) + if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) { + debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0])) + } + //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles) + debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) +} + +// Register metrics for the Go garbage collector statistics exported in +// debug.GCStats. The metrics are named by their fully-qualified Go symbols, +// i.e. debug.GCStats.PauseTotal. +func RegisterDebugGCStats(r Registry) { + debugMetrics.GCStats.LastGC = NewGauge() + debugMetrics.GCStats.NumGC = NewGauge() + debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) + //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) + debugMetrics.GCStats.PauseTotal = NewGauge() + debugMetrics.ReadGCStats = NewTimer() + + r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) + r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) + r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) + //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) + r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) + r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) +} + +// Allocate an initial slice for gcStats.Pause to avoid allocations during +// normal operation. +func init() { + gcStats.Pause = make([]time.Duration, 11) +} diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go new file mode 100644 index 00000000000..a8183dd7e21 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/ewma.go @@ -0,0 +1,138 @@ +package metrics + +import ( + "math" + "sync" + "sync/atomic" +) + +// EWMAs continuously calculate an exponentially-weighted moving average +// based on an outside source of clock ticks. +type EWMA interface { + Rate() float64 + Snapshot() EWMA + Tick() + Update(int64) +} + +// NewEWMA constructs a new EWMA with the given alpha. +func NewEWMA(alpha float64) EWMA { + if UseNilMetrics { + return NilEWMA{} + } + return &StandardEWMA{alpha: alpha} +} + +// NewEWMA1 constructs a new EWMA for a one-minute moving average. +func NewEWMA1() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/1)) +} + +// NewEWMA5 constructs a new EWMA for a five-minute moving average. +func NewEWMA5() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/5)) +} + +// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. +func NewEWMA15() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/15)) +} + +// EWMASnapshot is a read-only copy of another EWMA. +type EWMASnapshot float64 + +// Rate returns the rate of events per second at the time the snapshot was +// taken. +func (a EWMASnapshot) Rate() float64 { return float64(a) } + +// Snapshot returns the snapshot. +func (a EWMASnapshot) Snapshot() EWMA { return a } + +// Tick panics. +func (EWMASnapshot) Tick() { + panic("Tick called on an EWMASnapshot") +} + +// Update panics. +func (EWMASnapshot) Update(int64) { + panic("Update called on an EWMASnapshot") +} + +// NilEWMA is a no-op EWMA. +type NilEWMA struct{} + +// Rate is a no-op. +func (NilEWMA) Rate() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } + +// Tick is a no-op. +func (NilEWMA) Tick() {} + +// Update is a no-op. +func (NilEWMA) Update(n int64) {} + +// StandardEWMA is the standard implementation of an EWMA and tracks the number +// of uncounted events and processes them on each tick. It uses the +// sync/atomic package to manage uncounted events. +type StandardEWMA struct { + uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment + alpha float64 + rate uint64 + init uint32 + mutex sync.Mutex +} + +// Rate returns the moving average rate of events per second. +func (a *StandardEWMA) Rate() float64 { + currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) * float64(1e9) + return currentRate +} + +// Snapshot returns a read-only copy of the EWMA. +func (a *StandardEWMA) Snapshot() EWMA { + return EWMASnapshot(a.Rate()) +} + +// Tick ticks the clock to update the moving average. It assumes it is called +// every five seconds. +func (a *StandardEWMA) Tick() { + // Optimization to avoid mutex locking in the hot-path. + if atomic.LoadUint32(&a.init) == 1 { + a.updateRate(a.fetchInstantRate()) + } else { + // Slow-path: this is only needed on the first Tick() and preserves transactional updating + // of init and rate in the else block. The first conditional is needed below because + // a different thread could have set a.init = 1 between the time of the first atomic load and when + // the lock was acquired. + a.mutex.Lock() + if atomic.LoadUint32(&a.init) == 1 { + // The fetchInstantRate() uses atomic loading, which is unecessary in this critical section + // but again, this section is only invoked on the first successful Tick() operation. + a.updateRate(a.fetchInstantRate()) + } else { + atomic.StoreUint32(&a.init, 1) + atomic.StoreUint64(&a.rate, math.Float64bits(a.fetchInstantRate())) + } + a.mutex.Unlock() + } +} + +func (a *StandardEWMA) fetchInstantRate() float64 { + count := atomic.LoadInt64(&a.uncounted) + atomic.AddInt64(&a.uncounted, -count) + instantRate := float64(count) / float64(5e9) + return instantRate +} + +func (a *StandardEWMA) updateRate(instantRate float64) { + currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) + currentRate += a.alpha * (instantRate - currentRate) + atomic.StoreUint64(&a.rate, math.Float64bits(currentRate)) +} + +// Update adds n uncounted events. +func (a *StandardEWMA) Update(n int64) { + atomic.AddInt64(&a.uncounted, n) +} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go new file mode 100644 index 00000000000..cb57a93889f --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/gauge.go @@ -0,0 +1,120 @@ +package metrics + +import "sync/atomic" + +// Gauges hold an int64 value that can be set arbitrarily. +type Gauge interface { + Snapshot() Gauge + Update(int64) + Value() int64 +} + +// GetOrRegisterGauge returns an existing Gauge or constructs and registers a +// new StandardGauge. +func GetOrRegisterGauge(name string, r Registry) Gauge { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewGauge).(Gauge) +} + +// NewGauge constructs a new StandardGauge. +func NewGauge() Gauge { + if UseNilMetrics { + return NilGauge{} + } + return &StandardGauge{0} +} + +// NewRegisteredGauge constructs and registers a new StandardGauge. +func NewRegisteredGauge(name string, r Registry) Gauge { + c := NewGauge() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewFunctionalGauge constructs a new FunctionalGauge. +func NewFunctionalGauge(f func() int64) Gauge { + if UseNilMetrics { + return NilGauge{} + } + return &FunctionalGauge{value: f} +} + +// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. +func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge { + c := NewFunctionalGauge(f) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// GaugeSnapshot is a read-only copy of another Gauge. +type GaugeSnapshot int64 + +// Snapshot returns the snapshot. +func (g GaugeSnapshot) Snapshot() Gauge { return g } + +// Update panics. +func (GaugeSnapshot) Update(int64) { + panic("Update called on a GaugeSnapshot") +} + +// Value returns the value at the time the snapshot was taken. +func (g GaugeSnapshot) Value() int64 { return int64(g) } + +// NilGauge is a no-op Gauge. +type NilGauge struct{} + +// Snapshot is a no-op. +func (NilGauge) Snapshot() Gauge { return NilGauge{} } + +// Update is a no-op. +func (NilGauge) Update(v int64) {} + +// Value is a no-op. +func (NilGauge) Value() int64 { return 0 } + +// StandardGauge is the standard implementation of a Gauge and uses the +// sync/atomic package to manage a single int64 value. +type StandardGauge struct { + value int64 +} + +// Snapshot returns a read-only copy of the gauge. +func (g *StandardGauge) Snapshot() Gauge { + return GaugeSnapshot(g.Value()) +} + +// Update updates the gauge's value. +func (g *StandardGauge) Update(v int64) { + atomic.StoreInt64(&g.value, v) +} + +// Value returns the gauge's current value. +func (g *StandardGauge) Value() int64 { + return atomic.LoadInt64(&g.value) +} + +// FunctionalGauge returns value from given function +type FunctionalGauge struct { + value func() int64 +} + +// Value returns the gauge's current value. +func (g FunctionalGauge) Value() int64 { + return g.value() +} + +// Snapshot returns the snapshot. +func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) } + +// Update panics. +func (FunctionalGauge) Update(int64) { + panic("Update called on a FunctionalGauge") +} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go new file mode 100644 index 00000000000..3962e6db09a --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go @@ -0,0 +1,125 @@ +package metrics + +import ( + "math" + "sync/atomic" +) + +// GaugeFloat64s hold a float64 value that can be set arbitrarily. +type GaugeFloat64 interface { + Snapshot() GaugeFloat64 + Update(float64) + Value() float64 +} + +// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a +// new StandardGaugeFloat64. +func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) +} + +// NewGaugeFloat64 constructs a new StandardGaugeFloat64. +func NewGaugeFloat64() GaugeFloat64 { + if UseNilMetrics { + return NilGaugeFloat64{} + } + return &StandardGaugeFloat64{ + value: 0.0, + } +} + +// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. +func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { + c := NewGaugeFloat64() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewFunctionalGauge constructs a new FunctionalGauge. +func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 { + if UseNilMetrics { + return NilGaugeFloat64{} + } + return &FunctionalGaugeFloat64{value: f} +} + +// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. +func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 { + c := NewFunctionalGaugeFloat64(f) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. +type GaugeFloat64Snapshot float64 + +// Snapshot returns the snapshot. +func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g } + +// Update panics. +func (GaugeFloat64Snapshot) Update(float64) { + panic("Update called on a GaugeFloat64Snapshot") +} + +// Value returns the value at the time the snapshot was taken. +func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) } + +// NilGauge is a no-op Gauge. +type NilGaugeFloat64 struct{} + +// Snapshot is a no-op. +func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} } + +// Update is a no-op. +func (NilGaugeFloat64) Update(v float64) {} + +// Value is a no-op. +func (NilGaugeFloat64) Value() float64 { return 0.0 } + +// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses +// sync.Mutex to manage a single float64 value. +type StandardGaugeFloat64 struct { + value uint64 +} + +// Snapshot returns a read-only copy of the gauge. +func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { + return GaugeFloat64Snapshot(g.Value()) +} + +// Update updates the gauge's value. +func (g *StandardGaugeFloat64) Update(v float64) { + atomic.StoreUint64(&g.value, math.Float64bits(v)) +} + +// Value returns the gauge's current value. +func (g *StandardGaugeFloat64) Value() float64 { + return math.Float64frombits(atomic.LoadUint64(&g.value)) +} + +// FunctionalGaugeFloat64 returns value from given function +type FunctionalGaugeFloat64 struct { + value func() float64 +} + +// Value returns the gauge's current value. +func (g FunctionalGaugeFloat64) Value() float64 { + return g.value() +} + +// Snapshot returns the snapshot. +func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) } + +// Update panics. +func (FunctionalGaugeFloat64) Update(float64) { + panic("Update called on a FunctionalGaugeFloat64") +} diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go new file mode 100644 index 00000000000..abd0a7d2918 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/graphite.go @@ -0,0 +1,113 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "strconv" + "strings" + "time" +) + +// GraphiteConfig provides a container with configuration parameters for +// the Graphite exporter +type GraphiteConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names + Percentiles []float64 // Percentiles to export from timers and histograms +} + +// Graphite is a blocking exporter function which reports metrics in r +// to a graphite server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + GraphiteWithConfig(GraphiteConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, + }) +} + +// GraphiteWithConfig is a blocking exporter function just like Graphite, +// but it takes a GraphiteConfig instead. +func GraphiteWithConfig(c GraphiteConfig) { + log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") + for _ = range time.Tick(c.FlushInterval) { + if err := graphite(&c); nil != err { + log.Println(err) + } + } +} + +// GraphiteOnce performs a single submission to Graphite, returning a +// non-nil error on failed connections. This can be used in a loop +// similar to GraphiteWithConfig for custom error handling. +func GraphiteOnce(c GraphiteConfig) error { + log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") + return graphite(&c) +} + +func graphite(c *GraphiteConfig) error { + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) + case Gauge: + fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) + case GaugeFloat64: + fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go new file mode 100644 index 00000000000..445131caee5 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/healthcheck.go @@ -0,0 +1,61 @@ +package metrics + +// Healthchecks hold an error value describing an arbitrary up/down status. +type Healthcheck interface { + Check() + Error() error + Healthy() + Unhealthy(error) +} + +// NewHealthcheck constructs a new Healthcheck which will use the given +// function to update its status. +func NewHealthcheck(f func(Healthcheck)) Healthcheck { + if UseNilMetrics { + return NilHealthcheck{} + } + return &StandardHealthcheck{nil, f} +} + +// NilHealthcheck is a no-op. +type NilHealthcheck struct{} + +// Check is a no-op. +func (NilHealthcheck) Check() {} + +// Error is a no-op. +func (NilHealthcheck) Error() error { return nil } + +// Healthy is a no-op. +func (NilHealthcheck) Healthy() {} + +// Unhealthy is a no-op. +func (NilHealthcheck) Unhealthy(error) {} + +// StandardHealthcheck is the standard implementation of a Healthcheck and +// stores the status and a function to call to update the status. +type StandardHealthcheck struct { + err error + f func(Healthcheck) +} + +// Check runs the healthcheck function to update the healthcheck's status. +func (h *StandardHealthcheck) Check() { + h.f(h) +} + +// Error returns the healthcheck's status, which will be nil if it is healthy. +func (h *StandardHealthcheck) Error() error { + return h.err +} + +// Healthy marks the healthcheck as healthy. +func (h *StandardHealthcheck) Healthy() { + h.err = nil +} + +// Unhealthy marks the healthcheck as unhealthy. The error is stored and +// may be retrieved by the Error method. +func (h *StandardHealthcheck) Unhealthy(err error) { + h.err = err +} diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go new file mode 100644 index 00000000000..dbc837fe4d9 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/histogram.go @@ -0,0 +1,202 @@ +package metrics + +// Histograms calculate distribution statistics from a series of int64 values. +type Histogram interface { + Clear() + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Sample() Sample + Snapshot() Histogram + StdDev() float64 + Sum() int64 + Update(int64) + Variance() float64 +} + +// GetOrRegisterHistogram returns an existing Histogram or constructs and +// registers a new StandardHistogram. +func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) +} + +// NewHistogram constructs a new StandardHistogram from a Sample. +func NewHistogram(s Sample) Histogram { + if UseNilMetrics { + return NilHistogram{} + } + return &StandardHistogram{sample: s} +} + +// NewRegisteredHistogram constructs and registers a new StandardHistogram from +// a Sample. +func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { + c := NewHistogram(s) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// HistogramSnapshot is a read-only copy of another Histogram. +type HistogramSnapshot struct { + sample *SampleSnapshot +} + +// Clear panics. +func (*HistogramSnapshot) Clear() { + panic("Clear called on a HistogramSnapshot") +} + +// Count returns the number of samples recorded at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } + +// Max returns the maximum value in the sample at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } + +// Mean returns the mean of the values in the sample at the time the snapshot +// was taken. +func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } + +// Min returns the minimum value in the sample at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } + +// Percentile returns an arbitrary percentile of values in the sample at the +// time the snapshot was taken. +func (h *HistogramSnapshot) Percentile(p float64) float64 { + return h.sample.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the sample +// at the time the snapshot was taken. +func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { + return h.sample.Percentiles(ps) +} + +// Sample returns the Sample underlying the histogram. +func (h *HistogramSnapshot) Sample() Sample { return h.sample } + +// Snapshot returns the snapshot. +func (h *HistogramSnapshot) Snapshot() Histogram { return h } + +// StdDev returns the standard deviation of the values in the sample at the +// time the snapshot was taken. +func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } + +// Sum returns the sum in the sample at the time the snapshot was taken. +func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() } + +// Update panics. +func (*HistogramSnapshot) Update(int64) { + panic("Update called on a HistogramSnapshot") +} + +// Variance returns the variance of inputs at the time the snapshot was taken. +func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } + +// NilHistogram is a no-op Histogram. +type NilHistogram struct{} + +// Clear is a no-op. +func (NilHistogram) Clear() {} + +// Count is a no-op. +func (NilHistogram) Count() int64 { return 0 } + +// Max is a no-op. +func (NilHistogram) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilHistogram) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilHistogram) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilHistogram) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilHistogram) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Sample is a no-op. +func (NilHistogram) Sample() Sample { return NilSample{} } + +// Snapshot is a no-op. +func (NilHistogram) Snapshot() Histogram { return NilHistogram{} } + +// StdDev is a no-op. +func (NilHistogram) StdDev() float64 { return 0.0 } + +// Sum is a no-op. +func (NilHistogram) Sum() int64 { return 0 } + +// Update is a no-op. +func (NilHistogram) Update(v int64) {} + +// Variance is a no-op. +func (NilHistogram) Variance() float64 { return 0.0 } + +// StandardHistogram is the standard implementation of a Histogram and uses a +// Sample to bound its memory use. +type StandardHistogram struct { + sample Sample +} + +// Clear clears the histogram and its sample. +func (h *StandardHistogram) Clear() { h.sample.Clear() } + +// Count returns the number of samples recorded since the histogram was last +// cleared. +func (h *StandardHistogram) Count() int64 { return h.sample.Count() } + +// Max returns the maximum value in the sample. +func (h *StandardHistogram) Max() int64 { return h.sample.Max() } + +// Mean returns the mean of the values in the sample. +func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } + +// Min returns the minimum value in the sample. +func (h *StandardHistogram) Min() int64 { return h.sample.Min() } + +// Percentile returns an arbitrary percentile of the values in the sample. +func (h *StandardHistogram) Percentile(p float64) float64 { + return h.sample.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of the values in the +// sample. +func (h *StandardHistogram) Percentiles(ps []float64) []float64 { + return h.sample.Percentiles(ps) +} + +// Sample returns the Sample underlying the histogram. +func (h *StandardHistogram) Sample() Sample { return h.sample } + +// Snapshot returns a read-only copy of the histogram. +func (h *StandardHistogram) Snapshot() Histogram { + return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} +} + +// StdDev returns the standard deviation of the values in the sample. +func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } + +// Sum returns the sum in the sample. +func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() } + +// Update samples a new value. +func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } + +// Variance returns the variance of the values in the sample. +func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go new file mode 100644 index 00000000000..174b9477e92 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/json.go @@ -0,0 +1,31 @@ +package metrics + +import ( + "encoding/json" + "io" + "time" +) + +// MarshalJSON returns a byte slice containing a JSON representation of all +// the metrics in the Registry. +func (r *StandardRegistry) MarshalJSON() ([]byte, error) { + return json.Marshal(r.GetAll()) +} + +// WriteJSON writes metrics from the given registry periodically to the +// specified io.Writer as JSON. +func WriteJSON(r Registry, d time.Duration, w io.Writer) { + for _ = range time.Tick(d) { + WriteJSONOnce(r, w) + } +} + +// WriteJSONOnce writes metrics from the given registry to the specified +// io.Writer as JSON. +func WriteJSONOnce(r Registry, w io.Writer) { + json.NewEncoder(w).Encode(r) +} + +func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) { + return json.Marshal(p.GetAll()) +} diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go new file mode 100644 index 00000000000..f8074c04576 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/log.go @@ -0,0 +1,80 @@ +package metrics + +import ( + "time" +) + +type Logger interface { + Printf(format string, v ...interface{}) +} + +func Log(r Registry, freq time.Duration, l Logger) { + LogScaled(r, freq, time.Nanosecond, l) +} + +// Output each metric in the given registry periodically using the given +// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. +func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { + du := float64(scale) + duSuffix := scale.String()[1:] + + for _ = range time.Tick(freq) { + r.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + l.Printf("counter %s\n", name) + l.Printf(" count: %9d\n", metric.Count()) + case Gauge: + l.Printf("gauge %s\n", name) + l.Printf(" value: %9d\n", metric.Value()) + case GaugeFloat64: + l.Printf("gauge %s\n", name) + l.Printf(" value: %f\n", metric.Value()) + case Healthcheck: + metric.Check() + l.Printf("healthcheck %s\n", name) + l.Printf(" error: %v\n", metric.Error()) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + l.Printf("histogram %s\n", name) + l.Printf(" count: %9d\n", h.Count()) + l.Printf(" min: %9d\n", h.Min()) + l.Printf(" max: %9d\n", h.Max()) + l.Printf(" mean: %12.2f\n", h.Mean()) + l.Printf(" stddev: %12.2f\n", h.StdDev()) + l.Printf(" median: %12.2f\n", ps[0]) + l.Printf(" 75%%: %12.2f\n", ps[1]) + l.Printf(" 95%%: %12.2f\n", ps[2]) + l.Printf(" 99%%: %12.2f\n", ps[3]) + l.Printf(" 99.9%%: %12.2f\n", ps[4]) + case Meter: + m := metric.Snapshot() + l.Printf("meter %s\n", name) + l.Printf(" count: %9d\n", m.Count()) + l.Printf(" 1-min rate: %12.2f\n", m.Rate1()) + l.Printf(" 5-min rate: %12.2f\n", m.Rate5()) + l.Printf(" 15-min rate: %12.2f\n", m.Rate15()) + l.Printf(" mean rate: %12.2f\n", m.RateMean()) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + l.Printf("timer %s\n", name) + l.Printf(" count: %9d\n", t.Count()) + l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix) + l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix) + l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix) + l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix) + l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix) + l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix) + l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix) + l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix) + l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix) + l.Printf(" 1-min rate: %12.2f\n", t.Rate1()) + l.Printf(" 5-min rate: %12.2f\n", t.Rate5()) + l.Printf(" 15-min rate: %12.2f\n", t.Rate15()) + l.Printf(" mean rate: %12.2f\n", t.RateMean()) + } + }) + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go new file mode 100644 index 00000000000..223669bcb29 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/meter.go @@ -0,0 +1,251 @@ +package metrics + +import ( + "math" + "sync" + "sync/atomic" + "time" +) + +// Meters count events to produce exponentially-weighted moving average rates +// at one-, five-, and fifteen-minutes and a mean rate. +type Meter interface { + Count() int64 + Mark(int64) + Rate1() float64 + Rate5() float64 + Rate15() float64 + RateMean() float64 + Snapshot() Meter + Stop() +} + +// GetOrRegisterMeter returns an existing Meter or constructs and registers a +// new StandardMeter. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func GetOrRegisterMeter(name string, r Registry) Meter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewMeter).(Meter) +} + +// NewMeter constructs a new StandardMeter and launches a goroutine. +// Be sure to call Stop() once the meter is of no use to allow for garbage collection. +func NewMeter() Meter { + if UseNilMetrics { + return NilMeter{} + } + m := newStandardMeter() + arbiter.Lock() + defer arbiter.Unlock() + arbiter.meters[m] = struct{}{} + if !arbiter.started { + arbiter.started = true + go arbiter.tick() + } + return m +} + +// NewMeter constructs and registers a new StandardMeter and launches a +// goroutine. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func NewRegisteredMeter(name string, r Registry) Meter { + c := NewMeter() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// MeterSnapshot is a read-only copy of another Meter. +type MeterSnapshot struct { + count int64 + rate1, rate5, rate15, rateMean uint64 +} + +// Count returns the count of events at the time the snapshot was taken. +func (m *MeterSnapshot) Count() int64 { return m.count } + +// Mark panics. +func (*MeterSnapshot) Mark(n int64) { + panic("Mark called on a MeterSnapshot") +} + +// Rate1 returns the one-minute moving average rate of events per second at the +// time the snapshot was taken. +func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) } + +// Rate5 returns the five-minute moving average rate of events per second at +// the time the snapshot was taken. +func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) } + +// Rate15 returns the fifteen-minute moving average rate of events per second +// at the time the snapshot was taken. +func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) } + +// RateMean returns the meter's mean rate of events per second at the time the +// snapshot was taken. +func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) } + +// Snapshot returns the snapshot. +func (m *MeterSnapshot) Snapshot() Meter { return m } + +// Stop is a no-op. +func (m *MeterSnapshot) Stop() {} + +// NilMeter is a no-op Meter. +type NilMeter struct{} + +// Count is a no-op. +func (NilMeter) Count() int64 { return 0 } + +// Mark is a no-op. +func (NilMeter) Mark(n int64) {} + +// Rate1 is a no-op. +func (NilMeter) Rate1() float64 { return 0.0 } + +// Rate5 is a no-op. +func (NilMeter) Rate5() float64 { return 0.0 } + +// Rate15is a no-op. +func (NilMeter) Rate15() float64 { return 0.0 } + +// RateMean is a no-op. +func (NilMeter) RateMean() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilMeter) Snapshot() Meter { return NilMeter{} } + +// Stop is a no-op. +func (NilMeter) Stop() {} + +// StandardMeter is the standard implementation of a Meter. +type StandardMeter struct { + snapshot *MeterSnapshot + a1, a5, a15 EWMA + startTime time.Time + stopped uint32 +} + +func newStandardMeter() *StandardMeter { + return &StandardMeter{ + snapshot: &MeterSnapshot{}, + a1: NewEWMA1(), + a5: NewEWMA5(), + a15: NewEWMA15(), + startTime: time.Now(), + } +} + +// Stop stops the meter, Mark() will be a no-op if you use it after being stopped. +func (m *StandardMeter) Stop() { + if atomic.CompareAndSwapUint32(&m.stopped, 0, 1) { + arbiter.Lock() + delete(arbiter.meters, m) + arbiter.Unlock() + } +} + +// Count returns the number of events recorded. +func (m *StandardMeter) Count() int64 { + return atomic.LoadInt64(&m.snapshot.count) +} + +// Mark records the occurance of n events. +func (m *StandardMeter) Mark(n int64) { + if atomic.LoadUint32(&m.stopped) == 1 { + return + } + + atomic.AddInt64(&m.snapshot.count, n) + + m.a1.Update(n) + m.a5.Update(n) + m.a15.Update(n) + m.updateSnapshot() +} + +// Rate1 returns the one-minute moving average rate of events per second. +func (m *StandardMeter) Rate1() float64 { + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1)) +} + +// Rate5 returns the five-minute moving average rate of events per second. +func (m *StandardMeter) Rate5() float64 { + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5)) +} + +// Rate15 returns the fifteen-minute moving average rate of events per second. +func (m *StandardMeter) Rate15() float64 { + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15)) +} + +// RateMean returns the meter's mean rate of events per second. +func (m *StandardMeter) RateMean() float64 { + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean)) +} + +// Snapshot returns a read-only copy of the meter. +func (m *StandardMeter) Snapshot() Meter { + copiedSnapshot := MeterSnapshot{ + count: atomic.LoadInt64(&m.snapshot.count), + rate1: atomic.LoadUint64(&m.snapshot.rate1), + rate5: atomic.LoadUint64(&m.snapshot.rate5), + rate15: atomic.LoadUint64(&m.snapshot.rate15), + rateMean: atomic.LoadUint64(&m.snapshot.rateMean), + } + return &copiedSnapshot +} + +func (m *StandardMeter) updateSnapshot() { + rate1 := math.Float64bits(m.a1.Rate()) + rate5 := math.Float64bits(m.a5.Rate()) + rate15 := math.Float64bits(m.a15.Rate()) + rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds()) + + atomic.StoreUint64(&m.snapshot.rate1, rate1) + atomic.StoreUint64(&m.snapshot.rate5, rate5) + atomic.StoreUint64(&m.snapshot.rate15, rate15) + atomic.StoreUint64(&m.snapshot.rateMean, rateMean) +} + +func (m *StandardMeter) tick() { + m.a1.Tick() + m.a5.Tick() + m.a15.Tick() + m.updateSnapshot() +} + +// meterArbiter ticks meters every 5s from a single goroutine. +// meters are references in a set for future stopping. +type meterArbiter struct { + sync.RWMutex + started bool + meters map[*StandardMeter]struct{} + ticker *time.Ticker +} + +var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})} + +// Ticks meters on the scheduled interval +func (ma *meterArbiter) tick() { + for { + select { + case <-ma.ticker.C: + ma.tickMeters() + } + } +} + +func (ma *meterArbiter) tickMeters() { + ma.RLock() + defer ma.RUnlock() + for meter := range ma.meters { + meter.tick() + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go new file mode 100644 index 00000000000..b97a49ed123 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/metrics.go @@ -0,0 +1,13 @@ +// Go port of Coda Hale's Metrics library +// +// +// +// Coda Hale's original work: +package metrics + +// UseNilMetrics is checked by the constructor functions for all of the +// standard metrics. If it is true, the metric returned is a stub. +// +// This global kill-switch helps quantify the observer effect and makes +// for less cluttered pprof profiles. +var UseNilMetrics bool = false diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go new file mode 100644 index 00000000000..266b6c93d21 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/opentsdb.go @@ -0,0 +1,119 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "os" + "strings" + "time" +) + +var shortHostName string = "" + +// OpenTSDBConfig provides a container with configuration parameters for +// the OpenTSDB exporter +type OpenTSDBConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names +} + +// OpenTSDB is a blocking exporter function which reports metrics in r +// to a TSDB server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + OpenTSDBWithConfig(OpenTSDBConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + }) +} + +// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB, +// but it takes a OpenTSDBConfig instead. +func OpenTSDBWithConfig(c OpenTSDBConfig) { + for _ = range time.Tick(c.FlushInterval) { + if err := openTSDB(&c); nil != err { + log.Println(err) + } + } +} + +func getShortHostname() string { + if shortHostName == "" { + host, _ := os.Hostname() + if index := strings.Index(host, "."); index > 0 { + shortHostName = host[:index] + } else { + shortHostName = host + } + } + return shortHostName +} + +func openTSDB(c *OpenTSDBConfig) error { + shortHostname := getShortHostname() + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) + case Gauge: + fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + case GaugeFloat64: + fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname) + fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname) + fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname) + fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname) + fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname) + fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname) + fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname) + fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname) + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname) + fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname) + fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname) + fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname) + fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname) + fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go new file mode 100644 index 00000000000..b3bab64e15b --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/registry.go @@ -0,0 +1,363 @@ +package metrics + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// DuplicateMetric is the error returned by Registry.Register when a metric +// already exists. If you mean to Register that metric you must first +// Unregister the existing metric. +type DuplicateMetric string + +func (err DuplicateMetric) Error() string { + return fmt.Sprintf("duplicate metric: %s", string(err)) +} + +// A Registry holds references to a set of metrics by name and can iterate +// over them, calling callback functions provided by the user. +// +// This is an interface so as to encourage other structs to implement +// the Registry API as appropriate. +type Registry interface { + + // Call the given function for each registered metric. + Each(func(string, interface{})) + + // Get the metric by the given name or nil if none is registered. + Get(string) interface{} + + // GetAll metrics in the Registry. + GetAll() map[string]map[string]interface{} + + // Gets an existing metric or registers the given one. + // The interface can be the metric to register if not found in registry, + // or a function returning the metric for lazy instantiation. + GetOrRegister(string, interface{}) interface{} + + // Register the given metric under the given name. + Register(string, interface{}) error + + // Run all registered healthchecks. + RunHealthchecks() + + // Unregister the metric with the given name. + Unregister(string) + + // Unregister all metrics. (Mostly for testing.) + UnregisterAll() +} + +// The standard implementation of a Registry is a mutex-protected map +// of names to metrics. +type StandardRegistry struct { + metrics map[string]interface{} + mutex sync.RWMutex +} + +// Create a new registry. +func NewRegistry() Registry { + return &StandardRegistry{metrics: make(map[string]interface{})} +} + +// Call the given function for each registered metric. +func (r *StandardRegistry) Each(f func(string, interface{})) { + for name, i := range r.registered() { + f(name, i) + } +} + +// Get the metric by the given name or nil if none is registered. +func (r *StandardRegistry) Get(name string) interface{} { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.metrics[name] +} + +// Gets an existing metric or creates and registers a new one. Threadsafe +// alternative to calling Get and Register on failure. +// The interface can be the metric to register if not found in registry, +// or a function returning the metric for lazy instantiation. +func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { + // access the read lock first which should be re-entrant + r.mutex.RLock() + metric, ok := r.metrics[name] + r.mutex.RUnlock() + if ok { + return metric + } + + // only take the write lock if we'll be modifying the metrics map + r.mutex.Lock() + defer r.mutex.Unlock() + if metric, ok := r.metrics[name]; ok { + return metric + } + if v := reflect.ValueOf(i); v.Kind() == reflect.Func { + i = v.Call(nil)[0].Interface() + } + r.register(name, i) + return i +} + +// Register the given metric under the given name. Returns a DuplicateMetric +// if a metric by the given name is already registered. +func (r *StandardRegistry) Register(name string, i interface{}) error { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.register(name, i) +} + +// Run all registered healthchecks. +func (r *StandardRegistry) RunHealthchecks() { + r.mutex.RLock() + defer r.mutex.RUnlock() + for _, i := range r.metrics { + if h, ok := i.(Healthcheck); ok { + h.Check() + } + } +} + +// GetAll metrics in the Registry +func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { + data := make(map[string]map[string]interface{}) + r.Each(func(name string, i interface{}) { + values := make(map[string]interface{}) + switch metric := i.(type) { + case Counter: + values["count"] = metric.Count() + case Gauge: + values["value"] = metric.Value() + case GaugeFloat64: + values["value"] = metric.Value() + case Healthcheck: + values["error"] = nil + metric.Check() + if err := metric.Error(); nil != err { + values["error"] = metric.Error().Error() + } + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = h.Count() + values["min"] = h.Min() + values["max"] = h.Max() + values["mean"] = h.Mean() + values["stddev"] = h.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + case Meter: + m := metric.Snapshot() + values["count"] = m.Count() + values["1m.rate"] = m.Rate1() + values["5m.rate"] = m.Rate5() + values["15m.rate"] = m.Rate15() + values["mean.rate"] = m.RateMean() + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = t.Count() + values["min"] = t.Min() + values["max"] = t.Max() + values["mean"] = t.Mean() + values["stddev"] = t.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + values["1m.rate"] = t.Rate1() + values["5m.rate"] = t.Rate5() + values["15m.rate"] = t.Rate15() + values["mean.rate"] = t.RateMean() + } + data[name] = values + }) + return data +} + +// Unregister the metric with the given name. +func (r *StandardRegistry) Unregister(name string) { + r.mutex.Lock() + defer r.mutex.Unlock() + r.stop(name) + delete(r.metrics, name) +} + +// Unregister all metrics. (Mostly for testing.) +func (r *StandardRegistry) UnregisterAll() { + r.mutex.Lock() + defer r.mutex.Unlock() + for name, _ := range r.metrics { + r.stop(name) + delete(r.metrics, name) + } +} + +func (r *StandardRegistry) register(name string, i interface{}) error { + if _, ok := r.metrics[name]; ok { + return DuplicateMetric(name) + } + switch i.(type) { + case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer: + r.metrics[name] = i + } + return nil +} + +func (r *StandardRegistry) registered() map[string]interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + metrics := make(map[string]interface{}, len(r.metrics)) + for name, i := range r.metrics { + metrics[name] = i + } + return metrics +} + +func (r *StandardRegistry) stop(name string) { + if i, ok := r.metrics[name]; ok { + if s, ok := i.(Stoppable); ok { + s.Stop() + } + } +} + +// Stoppable defines the metrics which has to be stopped. +type Stoppable interface { + Stop() +} + +type PrefixedRegistry struct { + underlying Registry + prefix string +} + +func NewPrefixedRegistry(prefix string) Registry { + return &PrefixedRegistry{ + underlying: NewRegistry(), + prefix: prefix, + } +} + +func NewPrefixedChildRegistry(parent Registry, prefix string) Registry { + return &PrefixedRegistry{ + underlying: parent, + prefix: prefix, + } +} + +// Call the given function for each registered metric. +func (r *PrefixedRegistry) Each(fn func(string, interface{})) { + wrappedFn := func(prefix string) func(string, interface{}) { + return func(name string, iface interface{}) { + if strings.HasPrefix(name, prefix) { + fn(name, iface) + } else { + return + } + } + } + + baseRegistry, prefix := findPrefix(r, "") + baseRegistry.Each(wrappedFn(prefix)) +} + +func findPrefix(registry Registry, prefix string) (Registry, string) { + switch r := registry.(type) { + case *PrefixedRegistry: + return findPrefix(r.underlying, r.prefix+prefix) + case *StandardRegistry: + return r, prefix + } + return nil, "" +} + +// Get the metric by the given name or nil if none is registered. +func (r *PrefixedRegistry) Get(name string) interface{} { + realName := r.prefix + name + return r.underlying.Get(realName) +} + +// Gets an existing metric or registers the given one. +// The interface can be the metric to register if not found in registry, +// or a function returning the metric for lazy instantiation. +func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} { + realName := r.prefix + name + return r.underlying.GetOrRegister(realName, metric) +} + +// Register the given metric under the given name. The name will be prefixed. +func (r *PrefixedRegistry) Register(name string, metric interface{}) error { + realName := r.prefix + name + return r.underlying.Register(realName, metric) +} + +// Run all registered healthchecks. +func (r *PrefixedRegistry) RunHealthchecks() { + r.underlying.RunHealthchecks() +} + +// GetAll metrics in the Registry +func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} { + return r.underlying.GetAll() +} + +// Unregister the metric with the given name. The name will be prefixed. +func (r *PrefixedRegistry) Unregister(name string) { + realName := r.prefix + name + r.underlying.Unregister(realName) +} + +// Unregister all metrics. (Mostly for testing.) +func (r *PrefixedRegistry) UnregisterAll() { + r.underlying.UnregisterAll() +} + +var DefaultRegistry Registry = NewRegistry() + +// Call the given function for each registered metric. +func Each(f func(string, interface{})) { + DefaultRegistry.Each(f) +} + +// Get the metric by the given name or nil if none is registered. +func Get(name string) interface{} { + return DefaultRegistry.Get(name) +} + +// Gets an existing metric or creates and registers a new one. Threadsafe +// alternative to calling Get and Register on failure. +func GetOrRegister(name string, i interface{}) interface{} { + return DefaultRegistry.GetOrRegister(name, i) +} + +// Register the given metric under the given name. Returns a DuplicateMetric +// if a metric by the given name is already registered. +func Register(name string, i interface{}) error { + return DefaultRegistry.Register(name, i) +} + +// Register the given metric under the given name. Panics if a metric by the +// given name is already registered. +func MustRegister(name string, i interface{}) { + if err := Register(name, i); err != nil { + panic(err) + } +} + +// Run all registered healthchecks. +func RunHealthchecks() { + DefaultRegistry.RunHealthchecks() +} + +// Unregister the metric with the given name. +func Unregister(name string) { + DefaultRegistry.Unregister(name) +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go new file mode 100644 index 00000000000..11c6b785a0f --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime.go @@ -0,0 +1,212 @@ +package metrics + +import ( + "runtime" + "runtime/pprof" + "time" +) + +var ( + memStats runtime.MemStats + runtimeMetrics struct { + MemStats struct { + Alloc Gauge + BuckHashSys Gauge + DebugGC Gauge + EnableGC Gauge + Frees Gauge + HeapAlloc Gauge + HeapIdle Gauge + HeapInuse Gauge + HeapObjects Gauge + HeapReleased Gauge + HeapSys Gauge + LastGC Gauge + Lookups Gauge + Mallocs Gauge + MCacheInuse Gauge + MCacheSys Gauge + MSpanInuse Gauge + MSpanSys Gauge + NextGC Gauge + NumGC Gauge + GCCPUFraction GaugeFloat64 + PauseNs Histogram + PauseTotalNs Gauge + StackInuse Gauge + StackSys Gauge + Sys Gauge + TotalAlloc Gauge + } + NumCgoCall Gauge + NumGoroutine Gauge + NumThread Gauge + ReadMemStats Timer + } + frees uint64 + lookups uint64 + mallocs uint64 + numGC uint32 + numCgoCalls int64 + + threadCreateProfile = pprof.Lookup("threadcreate") +) + +// Capture new values for the Go runtime statistics exported in +// runtime.MemStats. This is designed to be called as a goroutine. +func CaptureRuntimeMemStats(r Registry, d time.Duration) { + for _ = range time.Tick(d) { + CaptureRuntimeMemStatsOnce(r) + } +} + +// Capture new values for the Go runtime statistics exported in +// runtime.MemStats. This is designed to be called in a background +// goroutine. Giving a registry which has not been given to +// RegisterRuntimeMemStats will panic. +// +// Be very careful with this because runtime.ReadMemStats calls the C +// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld() +// and that last one does what it says on the tin. +func CaptureRuntimeMemStatsOnce(r Registry) { + t := time.Now() + runtime.ReadMemStats(&memStats) // This takes 50-200us. + runtimeMetrics.ReadMemStats.UpdateSince(t) + + runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc)) + runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys)) + if memStats.DebugGC { + runtimeMetrics.MemStats.DebugGC.Update(1) + } else { + runtimeMetrics.MemStats.DebugGC.Update(0) + } + if memStats.EnableGC { + runtimeMetrics.MemStats.EnableGC.Update(1) + } else { + runtimeMetrics.MemStats.EnableGC.Update(0) + } + + runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees)) + runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc)) + runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle)) + runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse)) + runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects)) + runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased)) + runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys)) + runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC)) + runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups)) + runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs)) + runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse)) + runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys)) + runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse)) + runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) + runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) + runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) + runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats)) + + // + i := numGC % uint32(len(memStats.PauseNs)) + ii := memStats.NumGC % uint32(len(memStats.PauseNs)) + if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) { + for i = 0; i < uint32(len(memStats.PauseNs)); i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + } else { + if i > ii { + for ; i < uint32(len(memStats.PauseNs)); i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + i = 0 + } + for ; i < ii; i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + } + frees = memStats.Frees + lookups = memStats.Lookups + mallocs = memStats.Mallocs + numGC = memStats.NumGC + + runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs)) + runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse)) + runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys)) + runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys)) + runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc)) + + currentNumCgoCalls := numCgoCall() + runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls) + numCgoCalls = currentNumCgoCalls + + runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) + + runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count())) +} + +// Register runtimeMetrics for the Go runtime statistics exported in runtime and +// specifically runtime.MemStats. The runtimeMetrics are named by their +// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. +func RegisterRuntimeMemStats(r Registry) { + runtimeMetrics.MemStats.Alloc = NewGauge() + runtimeMetrics.MemStats.BuckHashSys = NewGauge() + runtimeMetrics.MemStats.DebugGC = NewGauge() + runtimeMetrics.MemStats.EnableGC = NewGauge() + runtimeMetrics.MemStats.Frees = NewGauge() + runtimeMetrics.MemStats.HeapAlloc = NewGauge() + runtimeMetrics.MemStats.HeapIdle = NewGauge() + runtimeMetrics.MemStats.HeapInuse = NewGauge() + runtimeMetrics.MemStats.HeapObjects = NewGauge() + runtimeMetrics.MemStats.HeapReleased = NewGauge() + runtimeMetrics.MemStats.HeapSys = NewGauge() + runtimeMetrics.MemStats.LastGC = NewGauge() + runtimeMetrics.MemStats.Lookups = NewGauge() + runtimeMetrics.MemStats.Mallocs = NewGauge() + runtimeMetrics.MemStats.MCacheInuse = NewGauge() + runtimeMetrics.MemStats.MCacheSys = NewGauge() + runtimeMetrics.MemStats.MSpanInuse = NewGauge() + runtimeMetrics.MemStats.MSpanSys = NewGauge() + runtimeMetrics.MemStats.NextGC = NewGauge() + runtimeMetrics.MemStats.NumGC = NewGauge() + runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() + runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) + runtimeMetrics.MemStats.PauseTotalNs = NewGauge() + runtimeMetrics.MemStats.StackInuse = NewGauge() + runtimeMetrics.MemStats.StackSys = NewGauge() + runtimeMetrics.MemStats.Sys = NewGauge() + runtimeMetrics.MemStats.TotalAlloc = NewGauge() + runtimeMetrics.NumCgoCall = NewGauge() + runtimeMetrics.NumGoroutine = NewGauge() + runtimeMetrics.NumThread = NewGauge() + runtimeMetrics.ReadMemStats = NewTimer() + + r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) + r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) + r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) + r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) + r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) + r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) + r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) + r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) + r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) + r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) + r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) + r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) + r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) + r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) + r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) + r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) + r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) + r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) + r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) + r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) + r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) + r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) + r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) + r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) + r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) + r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) + r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) + r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) + r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) + r.Register("runtime.NumThread", runtimeMetrics.NumThread) + r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go new file mode 100644 index 00000000000..e3391f4e89f --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go @@ -0,0 +1,10 @@ +// +build cgo +// +build !appengine + +package metrics + +import "runtime" + +func numCgoCall() int64 { + return runtime.NumCgoCall() +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go new file mode 100644 index 00000000000..ca12c05bac7 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go @@ -0,0 +1,9 @@ +// +build go1.5 + +package metrics + +import "runtime" + +func gcCPUFraction(memStats *runtime.MemStats) float64 { + return memStats.GCCPUFraction +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go new file mode 100644 index 00000000000..616a3b4751b --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go @@ -0,0 +1,7 @@ +// +build !cgo appengine + +package metrics + +func numCgoCall() int64 { + return 0 +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go new file mode 100644 index 00000000000..be96aa6f1be --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go @@ -0,0 +1,9 @@ +// +build !go1.5 + +package metrics + +import "runtime" + +func gcCPUFraction(memStats *runtime.MemStats) float64 { + return 0 +} diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go new file mode 100644 index 00000000000..fecee5ef68b --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/sample.go @@ -0,0 +1,616 @@ +package metrics + +import ( + "math" + "math/rand" + "sort" + "sync" + "time" +) + +const rescaleThreshold = time.Hour + +// Samples maintain a statistically-significant selection of values from +// a stream. +type Sample interface { + Clear() + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Size() int + Snapshot() Sample + StdDev() float64 + Sum() int64 + Update(int64) + Values() []int64 + Variance() float64 +} + +// ExpDecaySample is an exponentially-decaying sample using a forward-decaying +// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time +// Decay Model for Streaming Systems". +// +// +type ExpDecaySample struct { + alpha float64 + count int64 + mutex sync.Mutex + reservoirSize int + t0, t1 time.Time + values *expDecaySampleHeap +} + +// NewExpDecaySample constructs a new exponentially-decaying sample with the +// given reservoir size and alpha. +func NewExpDecaySample(reservoirSize int, alpha float64) Sample { + if UseNilMetrics { + return NilSample{} + } + s := &ExpDecaySample{ + alpha: alpha, + reservoirSize: reservoirSize, + t0: time.Now(), + values: newExpDecaySampleHeap(reservoirSize), + } + s.t1 = s.t0.Add(rescaleThreshold) + return s +} + +// Clear clears all samples. +func (s *ExpDecaySample) Clear() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count = 0 + s.t0 = time.Now() + s.t1 = s.t0.Add(rescaleThreshold) + s.values.Clear() +} + +// Count returns the number of samples recorded, which may exceed the +// reservoir size. +func (s *ExpDecaySample) Count() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.count +} + +// Max returns the maximum value in the sample, which may not be the maximum +// value ever to be part of the sample. +func (s *ExpDecaySample) Max() int64 { + return SampleMax(s.Values()) +} + +// Mean returns the mean of the values in the sample. +func (s *ExpDecaySample) Mean() float64 { + return SampleMean(s.Values()) +} + +// Min returns the minimum value in the sample, which may not be the minimum +// value ever to be part of the sample. +func (s *ExpDecaySample) Min() int64 { + return SampleMin(s.Values()) +} + +// Percentile returns an arbitrary percentile of values in the sample. +func (s *ExpDecaySample) Percentile(p float64) float64 { + return SamplePercentile(s.Values(), p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the +// sample. +func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { + return SamplePercentiles(s.Values(), ps) +} + +// Size returns the size of the sample, which is at most the reservoir size. +func (s *ExpDecaySample) Size() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.values.Size() +} + +// Snapshot returns a read-only copy of the sample. +func (s *ExpDecaySample) Snapshot() Sample { + s.mutex.Lock() + defer s.mutex.Unlock() + vals := s.values.Values() + values := make([]int64, len(vals)) + for i, v := range vals { + values[i] = v.v + } + return &SampleSnapshot{ + count: s.count, + values: values, + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (s *ExpDecaySample) StdDev() float64 { + return SampleStdDev(s.Values()) +} + +// Sum returns the sum of the values in the sample. +func (s *ExpDecaySample) Sum() int64 { + return SampleSum(s.Values()) +} + +// Update samples a new value. +func (s *ExpDecaySample) Update(v int64) { + s.update(time.Now(), v) +} + +// Values returns a copy of the values in the sample. +func (s *ExpDecaySample) Values() []int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + vals := s.values.Values() + values := make([]int64, len(vals)) + for i, v := range vals { + values[i] = v.v + } + return values +} + +// Variance returns the variance of the values in the sample. +func (s *ExpDecaySample) Variance() float64 { + return SampleVariance(s.Values()) +} + +// update samples a new value at a particular timestamp. This is a method all +// its own to facilitate testing. +func (s *ExpDecaySample) update(t time.Time, v int64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count++ + if s.values.Size() == s.reservoirSize { + s.values.Pop() + } + s.values.Push(expDecaySample{ + k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), + v: v, + }) + if t.After(s.t1) { + values := s.values.Values() + t0 := s.t0 + s.values.Clear() + s.t0 = t + s.t1 = s.t0.Add(rescaleThreshold) + for _, v := range values { + v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds()) + s.values.Push(v) + } + } +} + +// NilSample is a no-op Sample. +type NilSample struct{} + +// Clear is a no-op. +func (NilSample) Clear() {} + +// Count is a no-op. +func (NilSample) Count() int64 { return 0 } + +// Max is a no-op. +func (NilSample) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilSample) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilSample) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilSample) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilSample) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Size is a no-op. +func (NilSample) Size() int { return 0 } + +// Sample is a no-op. +func (NilSample) Snapshot() Sample { return NilSample{} } + +// StdDev is a no-op. +func (NilSample) StdDev() float64 { return 0.0 } + +// Sum is a no-op. +func (NilSample) Sum() int64 { return 0 } + +// Update is a no-op. +func (NilSample) Update(v int64) {} + +// Values is a no-op. +func (NilSample) Values() []int64 { return []int64{} } + +// Variance is a no-op. +func (NilSample) Variance() float64 { return 0.0 } + +// SampleMax returns the maximum value of the slice of int64. +func SampleMax(values []int64) int64 { + if 0 == len(values) { + return 0 + } + var max int64 = math.MinInt64 + for _, v := range values { + if max < v { + max = v + } + } + return max +} + +// SampleMean returns the mean value of the slice of int64. +func SampleMean(values []int64) float64 { + if 0 == len(values) { + return 0.0 + } + return float64(SampleSum(values)) / float64(len(values)) +} + +// SampleMin returns the minimum value of the slice of int64. +func SampleMin(values []int64) int64 { + if 0 == len(values) { + return 0 + } + var min int64 = math.MaxInt64 + for _, v := range values { + if min > v { + min = v + } + } + return min +} + +// SamplePercentiles returns an arbitrary percentile of the slice of int64. +func SamplePercentile(values int64Slice, p float64) float64 { + return SamplePercentiles(values, []float64{p})[0] +} + +// SamplePercentiles returns a slice of arbitrary percentiles of the slice of +// int64. +func SamplePercentiles(values int64Slice, ps []float64) []float64 { + scores := make([]float64, len(ps)) + size := len(values) + if size > 0 { + sort.Sort(values) + for i, p := range ps { + pos := p * float64(size+1) + if pos < 1.0 { + scores[i] = float64(values[0]) + } else if pos >= float64(size) { + scores[i] = float64(values[size-1]) + } else { + lower := float64(values[int(pos)-1]) + upper := float64(values[int(pos)]) + scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) + } + } + } + return scores +} + +// SampleSnapshot is a read-only copy of another Sample. +type SampleSnapshot struct { + count int64 + values []int64 +} + +func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot { + return &SampleSnapshot{ + count: count, + values: values, + } +} + +// Clear panics. +func (*SampleSnapshot) Clear() { + panic("Clear called on a SampleSnapshot") +} + +// Count returns the count of inputs at the time the snapshot was taken. +func (s *SampleSnapshot) Count() int64 { return s.count } + +// Max returns the maximal value at the time the snapshot was taken. +func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } + +// Mean returns the mean value at the time the snapshot was taken. +func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } + +// Min returns the minimal value at the time the snapshot was taken. +func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } + +// Percentile returns an arbitrary percentile of values at the time the +// snapshot was taken. +func (s *SampleSnapshot) Percentile(p float64) float64 { + return SamplePercentile(s.values, p) +} + +// Percentiles returns a slice of arbitrary percentiles of values at the time +// the snapshot was taken. +func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { + return SamplePercentiles(s.values, ps) +} + +// Size returns the size of the sample at the time the snapshot was taken. +func (s *SampleSnapshot) Size() int { return len(s.values) } + +// Snapshot returns the snapshot. +func (s *SampleSnapshot) Snapshot() Sample { return s } + +// StdDev returns the standard deviation of values at the time the snapshot was +// taken. +func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } + +// Sum returns the sum of values at the time the snapshot was taken. +func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } + +// Update panics. +func (*SampleSnapshot) Update(int64) { + panic("Update called on a SampleSnapshot") +} + +// Values returns a copy of the values in the sample. +func (s *SampleSnapshot) Values() []int64 { + values := make([]int64, len(s.values)) + copy(values, s.values) + return values +} + +// Variance returns the variance of values at the time the snapshot was taken. +func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } + +// SampleStdDev returns the standard deviation of the slice of int64. +func SampleStdDev(values []int64) float64 { + return math.Sqrt(SampleVariance(values)) +} + +// SampleSum returns the sum of the slice of int64. +func SampleSum(values []int64) int64 { + var sum int64 + for _, v := range values { + sum += v + } + return sum +} + +// SampleVariance returns the variance of the slice of int64. +func SampleVariance(values []int64) float64 { + if 0 == len(values) { + return 0.0 + } + m := SampleMean(values) + var sum float64 + for _, v := range values { + d := float64(v) - m + sum += d * d + } + return sum / float64(len(values)) +} + +// A uniform sample using Vitter's Algorithm R. +// +// +type UniformSample struct { + count int64 + mutex sync.Mutex + reservoirSize int + values []int64 +} + +// NewUniformSample constructs a new uniform sample with the given reservoir +// size. +func NewUniformSample(reservoirSize int) Sample { + if UseNilMetrics { + return NilSample{} + } + return &UniformSample{ + reservoirSize: reservoirSize, + values: make([]int64, 0, reservoirSize), + } +} + +// Clear clears all samples. +func (s *UniformSample) Clear() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count = 0 + s.values = make([]int64, 0, s.reservoirSize) +} + +// Count returns the number of samples recorded, which may exceed the +// reservoir size. +func (s *UniformSample) Count() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.count +} + +// Max returns the maximum value in the sample, which may not be the maximum +// value ever to be part of the sample. +func (s *UniformSample) Max() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMax(s.values) +} + +// Mean returns the mean of the values in the sample. +func (s *UniformSample) Mean() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMean(s.values) +} + +// Min returns the minimum value in the sample, which may not be the minimum +// value ever to be part of the sample. +func (s *UniformSample) Min() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMin(s.values) +} + +// Percentile returns an arbitrary percentile of values in the sample. +func (s *UniformSample) Percentile(p float64) float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SamplePercentile(s.values, p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the +// sample. +func (s *UniformSample) Percentiles(ps []float64) []float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SamplePercentiles(s.values, ps) +} + +// Size returns the size of the sample, which is at most the reservoir size. +func (s *UniformSample) Size() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return len(s.values) +} + +// Snapshot returns a read-only copy of the sample. +func (s *UniformSample) Snapshot() Sample { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + copy(values, s.values) + return &SampleSnapshot{ + count: s.count, + values: values, + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (s *UniformSample) StdDev() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleStdDev(s.values) +} + +// Sum returns the sum of the values in the sample. +func (s *UniformSample) Sum() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleSum(s.values) +} + +// Update samples a new value. +func (s *UniformSample) Update(v int64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count++ + if len(s.values) < s.reservoirSize { + s.values = append(s.values, v) + } else { + r := rand.Int63n(s.count) + if r < int64(len(s.values)) { + s.values[int(r)] = v + } + } +} + +// Values returns a copy of the values in the sample. +func (s *UniformSample) Values() []int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + copy(values, s.values) + return values +} + +// Variance returns the variance of the values in the sample. +func (s *UniformSample) Variance() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleVariance(s.values) +} + +// expDecaySample represents an individual sample in a heap. +type expDecaySample struct { + k float64 + v int64 +} + +func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap { + return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)} +} + +// expDecaySampleHeap is a min-heap of expDecaySamples. +// The internal implementation is copied from the standard library's container/heap +type expDecaySampleHeap struct { + s []expDecaySample +} + +func (h *expDecaySampleHeap) Clear() { + h.s = h.s[:0] +} + +func (h *expDecaySampleHeap) Push(s expDecaySample) { + n := len(h.s) + h.s = h.s[0 : n+1] + h.s[n] = s + h.up(n) +} + +func (h *expDecaySampleHeap) Pop() expDecaySample { + n := len(h.s) - 1 + h.s[0], h.s[n] = h.s[n], h.s[0] + h.down(0, n) + + n = len(h.s) + s := h.s[n-1] + h.s = h.s[0 : n-1] + return s +} + +func (h *expDecaySampleHeap) Size() int { + return len(h.s) +} + +func (h *expDecaySampleHeap) Values() []expDecaySample { + return h.s +} + +func (h *expDecaySampleHeap) up(j int) { + for { + i := (j - 1) / 2 // parent + if i == j || !(h.s[j].k < h.s[i].k) { + break + } + h.s[i], h.s[j] = h.s[j], h.s[i] + j = i + } +} + +func (h *expDecaySampleHeap) down(i, n int) { + for { + j1 := 2*i + 1 + if j1 >= n || j1 < 0 { // j1 < 0 after int overflow + break + } + j := j1 // left child + if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) { + j = j2 // = 2*i + 2 // right child + } + if !(h.s[j].k < h.s[i].k) { + break + } + h.s[i], h.s[j] = h.s[j], h.s[i] + i = j + } +} + +type int64Slice []int64 + +func (p int64Slice) Len() int { return len(p) } +func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go new file mode 100644 index 00000000000..693f190855c --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/syslog.go @@ -0,0 +1,78 @@ +// +build !windows + +package metrics + +import ( + "fmt" + "log/syslog" + "time" +) + +// Output each metric in the given registry to syslog periodically using +// the given syslogger. +func Syslog(r Registry, d time.Duration, w *syslog.Writer) { + for _ = range time.Tick(d) { + r.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count())) + case Gauge: + w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) + case GaugeFloat64: + w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value())) + case Healthcheck: + metric.Check() + w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + w.Info(fmt.Sprintf( + "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f", + name, + h.Count(), + h.Min(), + h.Max(), + h.Mean(), + h.StdDev(), + ps[0], + ps[1], + ps[2], + ps[3], + ps[4], + )) + case Meter: + m := metric.Snapshot() + w.Info(fmt.Sprintf( + "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f", + name, + m.Count(), + m.Rate1(), + m.Rate5(), + m.Rate15(), + m.RateMean(), + )) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + w.Info(fmt.Sprintf( + "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f", + name, + t.Count(), + t.Min(), + t.Max(), + t.Mean(), + t.StdDev(), + ps[0], + ps[1], + ps[2], + ps[3], + ps[4], + t.Rate1(), + t.Rate5(), + t.Rate15(), + t.RateMean(), + )) + } + }) + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go new file mode 100644 index 00000000000..d6ec4c6260f --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/timer.go @@ -0,0 +1,329 @@ +package metrics + +import ( + "sync" + "time" +) + +// Timers capture the duration and rate of events. +type Timer interface { + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Rate1() float64 + Rate5() float64 + Rate15() float64 + RateMean() float64 + Snapshot() Timer + StdDev() float64 + Stop() + Sum() int64 + Time(func()) + Update(time.Duration) + UpdateSince(time.Time) + Variance() float64 +} + +// GetOrRegisterTimer returns an existing Timer or constructs and registers a +// new StandardTimer. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func GetOrRegisterTimer(name string, r Registry) Timer { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewTimer).(Timer) +} + +// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. +// Be sure to call Stop() once the timer is of no use to allow for garbage collection. +func NewCustomTimer(h Histogram, m Meter) Timer { + if UseNilMetrics { + return NilTimer{} + } + return &StandardTimer{ + histogram: h, + meter: m, + } +} + +// NewRegisteredTimer constructs and registers a new StandardTimer. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. +func NewRegisteredTimer(name string, r Registry) Timer { + c := NewTimer() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewTimer constructs a new StandardTimer using an exponentially-decaying +// sample with the same reservoir size and alpha as UNIX load averages. +// Be sure to call Stop() once the timer is of no use to allow for garbage collection. +func NewTimer() Timer { + if UseNilMetrics { + return NilTimer{} + } + return &StandardTimer{ + histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), + meter: NewMeter(), + } +} + +// NilTimer is a no-op Timer. +type NilTimer struct { + h Histogram + m Meter +} + +// Count is a no-op. +func (NilTimer) Count() int64 { return 0 } + +// Max is a no-op. +func (NilTimer) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilTimer) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilTimer) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilTimer) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilTimer) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Rate1 is a no-op. +func (NilTimer) Rate1() float64 { return 0.0 } + +// Rate5 is a no-op. +func (NilTimer) Rate5() float64 { return 0.0 } + +// Rate15 is a no-op. +func (NilTimer) Rate15() float64 { return 0.0 } + +// RateMean is a no-op. +func (NilTimer) RateMean() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilTimer) Snapshot() Timer { return NilTimer{} } + +// StdDev is a no-op. +func (NilTimer) StdDev() float64 { return 0.0 } + +// Stop is a no-op. +func (NilTimer) Stop() {} + +// Sum is a no-op. +func (NilTimer) Sum() int64 { return 0 } + +// Time is a no-op. +func (NilTimer) Time(func()) {} + +// Update is a no-op. +func (NilTimer) Update(time.Duration) {} + +// UpdateSince is a no-op. +func (NilTimer) UpdateSince(time.Time) {} + +// Variance is a no-op. +func (NilTimer) Variance() float64 { return 0.0 } + +// StandardTimer is the standard implementation of a Timer and uses a Histogram +// and Meter. +type StandardTimer struct { + histogram Histogram + meter Meter + mutex sync.Mutex +} + +// Count returns the number of events recorded. +func (t *StandardTimer) Count() int64 { + return t.histogram.Count() +} + +// Max returns the maximum value in the sample. +func (t *StandardTimer) Max() int64 { + return t.histogram.Max() +} + +// Mean returns the mean of the values in the sample. +func (t *StandardTimer) Mean() float64 { + return t.histogram.Mean() +} + +// Min returns the minimum value in the sample. +func (t *StandardTimer) Min() int64 { + return t.histogram.Min() +} + +// Percentile returns an arbitrary percentile of the values in the sample. +func (t *StandardTimer) Percentile(p float64) float64 { + return t.histogram.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of the values in the +// sample. +func (t *StandardTimer) Percentiles(ps []float64) []float64 { + return t.histogram.Percentiles(ps) +} + +// Rate1 returns the one-minute moving average rate of events per second. +func (t *StandardTimer) Rate1() float64 { + return t.meter.Rate1() +} + +// Rate5 returns the five-minute moving average rate of events per second. +func (t *StandardTimer) Rate5() float64 { + return t.meter.Rate5() +} + +// Rate15 returns the fifteen-minute moving average rate of events per second. +func (t *StandardTimer) Rate15() float64 { + return t.meter.Rate15() +} + +// RateMean returns the meter's mean rate of events per second. +func (t *StandardTimer) RateMean() float64 { + return t.meter.RateMean() +} + +// Snapshot returns a read-only copy of the timer. +func (t *StandardTimer) Snapshot() Timer { + t.mutex.Lock() + defer t.mutex.Unlock() + return &TimerSnapshot{ + histogram: t.histogram.Snapshot().(*HistogramSnapshot), + meter: t.meter.Snapshot().(*MeterSnapshot), + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (t *StandardTimer) StdDev() float64 { + return t.histogram.StdDev() +} + +// Stop stops the meter. +func (t *StandardTimer) Stop() { + t.meter.Stop() +} + +// Sum returns the sum in the sample. +func (t *StandardTimer) Sum() int64 { + return t.histogram.Sum() +} + +// Record the duration of the execution of the given function. +func (t *StandardTimer) Time(f func()) { + ts := time.Now() + f() + t.Update(time.Since(ts)) +} + +// Record the duration of an event. +func (t *StandardTimer) Update(d time.Duration) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.histogram.Update(int64(d)) + t.meter.Mark(1) +} + +// Record the duration of an event that started at a time and ends now. +func (t *StandardTimer) UpdateSince(ts time.Time) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.histogram.Update(int64(time.Since(ts))) + t.meter.Mark(1) +} + +// Variance returns the variance of the values in the sample. +func (t *StandardTimer) Variance() float64 { + return t.histogram.Variance() +} + +// TimerSnapshot is a read-only copy of another Timer. +type TimerSnapshot struct { + histogram *HistogramSnapshot + meter *MeterSnapshot +} + +// Count returns the number of events recorded at the time the snapshot was +// taken. +func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } + +// Max returns the maximum value at the time the snapshot was taken. +func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } + +// Mean returns the mean value at the time the snapshot was taken. +func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } + +// Min returns the minimum value at the time the snapshot was taken. +func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } + +// Percentile returns an arbitrary percentile of sampled values at the time the +// snapshot was taken. +func (t *TimerSnapshot) Percentile(p float64) float64 { + return t.histogram.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of sampled values at +// the time the snapshot was taken. +func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { + return t.histogram.Percentiles(ps) +} + +// Rate1 returns the one-minute moving average rate of events per second at the +// time the snapshot was taken. +func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } + +// Rate5 returns the five-minute moving average rate of events per second at +// the time the snapshot was taken. +func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } + +// Rate15 returns the fifteen-minute moving average rate of events per second +// at the time the snapshot was taken. +func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } + +// RateMean returns the meter's mean rate of events per second at the time the +// snapshot was taken. +func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } + +// Snapshot returns the snapshot. +func (t *TimerSnapshot) Snapshot() Timer { return t } + +// StdDev returns the standard deviation of the values at the time the snapshot +// was taken. +func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } + +// Stop is a no-op. +func (t *TimerSnapshot) Stop() {} + +// Sum returns the sum at the time the snapshot was taken. +func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() } + +// Time panics. +func (*TimerSnapshot) Time(func()) { + panic("Time called on a TimerSnapshot") +} + +// Update panics. +func (*TimerSnapshot) Update(time.Duration) { + panic("Update called on a TimerSnapshot") +} + +// UpdateSince panics. +func (*TimerSnapshot) UpdateSince(time.Time) { + panic("UpdateSince called on a TimerSnapshot") +} + +// Variance returns the variance of the values at the time the snapshot was +// taken. +func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go new file mode 100644 index 00000000000..091e971d2e6 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/writer.go @@ -0,0 +1,100 @@ +package metrics + +import ( + "fmt" + "io" + "sort" + "time" +) + +// Write sorts writes each metric in the given registry periodically to the +// given io.Writer. +func Write(r Registry, d time.Duration, w io.Writer) { + for _ = range time.Tick(d) { + WriteOnce(r, w) + } +} + +// WriteOnce sorts and writes metrics in the given registry to the given +// io.Writer. +func WriteOnce(r Registry, w io.Writer) { + var namedMetrics namedMetricSlice + r.Each(func(name string, i interface{}) { + namedMetrics = append(namedMetrics, namedMetric{name, i}) + }) + + sort.Sort(namedMetrics) + for _, namedMetric := range namedMetrics { + switch metric := namedMetric.m.(type) { + case Counter: + fmt.Fprintf(w, "counter %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", metric.Count()) + case Gauge: + fmt.Fprintf(w, "gauge %s\n", namedMetric.name) + fmt.Fprintf(w, " value: %9d\n", metric.Value()) + case GaugeFloat64: + fmt.Fprintf(w, "gauge %s\n", namedMetric.name) + fmt.Fprintf(w, " value: %f\n", metric.Value()) + case Healthcheck: + metric.Check() + fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) + fmt.Fprintf(w, " error: %v\n", metric.Error()) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "histogram %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", h.Count()) + fmt.Fprintf(w, " min: %9d\n", h.Min()) + fmt.Fprintf(w, " max: %9d\n", h.Max()) + fmt.Fprintf(w, " mean: %12.2f\n", h.Mean()) + fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev()) + fmt.Fprintf(w, " median: %12.2f\n", ps[0]) + fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) + fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) + fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) + fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "meter %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", m.Count()) + fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1()) + fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5()) + fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15()) + fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean()) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "timer %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", t.Count()) + fmt.Fprintf(w, " min: %9d\n", t.Min()) + fmt.Fprintf(w, " max: %9d\n", t.Max()) + fmt.Fprintf(w, " mean: %12.2f\n", t.Mean()) + fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev()) + fmt.Fprintf(w, " median: %12.2f\n", ps[0]) + fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) + fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) + fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) + fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) + fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1()) + fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5()) + fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15()) + fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean()) + } + } +} + +type namedMetric struct { + name string + m interface{} +} + +// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. +type namedMetricSlice []namedMetric + +func (nms namedMetricSlice) Len() int { return len(nms) } + +func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } + +func (nms namedMetricSlice) Less(i, j int) bool { + return nms[i].name < nms[j].name +} From e9dad0d9906f88115e32e0439eb2d073ec83f01d Mon Sep 17 00:00:00 2001 From: Sabari Kumar Murugesan Date: Thu, 8 Nov 2018 00:42:42 -0800 Subject: [PATCH 19/20] Address PR feedback --- .../kafka/controller/channel/reconcile.go | 58 +++++++++++-------- .../controller/channel/reconcile_test.go | 11 ++-- .../kafka/controller/reconcile.go | 42 +++----------- pkg/provisioners/kafka/main.go | 1 + 4 files changed, 49 insertions(+), 63 deletions(-) diff --git a/pkg/provisioners/kafka/controller/channel/reconcile.go b/pkg/provisioners/kafka/controller/channel/reconcile.go index 118de57b998..dc5248dfcdf 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile.go @@ -37,10 +37,13 @@ import ( const ( finalizerName = controllerAgentName - ArgumentNumPartitions = "NumPartitions" - DefaultNumPartitions = 1 + DefaultNumPartitions = 1 ) +type channelArgs struct { + NumPartitions int32 `json:"NumPartitions,omitempty"` +} + // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Channel resource // with the current status of the resource. @@ -50,11 +53,14 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err channel := &v1alpha1.Channel{} err := r.client.Get(context.TODO(), request.NamespacedName, channel) + // The Channel may have been deleted since it was added to the workqueue. If so, there is + // nothing to be done since the dependent resources would have been deleted as well. if errors.IsNotFound(err) { r.logger.Info("could not find channel", zap.Any("request", request)) return reconcile.Result{}, nil } + // Any other error should be retried in another reconciliation. if err != nil { r.logger.Error("could not fetch channel", zap.Error(err)) return reconcile.Result{}, err @@ -88,9 +94,9 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err err = fmt.Errorf("ClusterChannelProvisioner %s is not ready", clusterChannelProvisioner.Name) } - if err := r.updateChannel(ctx, newChannel); err != nil { - r.logger.Info("failed to update channel status", zap.Error(err)) - return reconcile.Result{}, err + if updateChannelErr := r.updateChannel(ctx, newChannel); updateChannelErr != nil { + r.logger.Info("failed to update channel status", zap.Error(updateChannelErr)) + return reconcile.Result{}, updateChannelErr } // Requeue if the resource is not ready: @@ -98,15 +104,14 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err } func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { - // See if the channel has been deleted - accessor, err := meta.Accessor(channel) - if err != nil { - r.logger.Info("failed to get metadata", zap.Error(err)) - return err - } + // We don't currently initialize r.kafkaClusterAdmin, hence we end up creating the cluster admin client every time. + // This is because of an issue with Shopify/sarama. See https://github.com/Shopify/sarama/issues/1162. + // Once the issue is fixed we should use a shared cluster admin client. Also, r.kafkaClusterAdmin is currently + // used to pass a fake admin client in the tests. kafkaClusterAdmin := r.kafkaClusterAdmin if kafkaClusterAdmin == nil { + var err error kafkaClusterAdmin, err = createKafkaAdminClient(r.config) if err != nil { r.logger.Fatal("unable to build kafka admin client", zap.Error(err)) @@ -114,6 +119,12 @@ func (r *reconciler) reconcile(channel *v1alpha1.Channel) error { } } + // See if the channel has been deleted + accessor, err := meta.Accessor(channel) + if err != nil { + r.logger.Info("failed to get metadata", zap.Error(err)) + return err + } deletionTimestamp := accessor.GetDeletionTimestamp() if deletionTimestamp != nil { r.logger.Info(fmt.Sprintf("DeletionTimestamp: %v", deletionTimestamp)) @@ -142,26 +153,23 @@ func (r *reconciler) provisionChannel(channel *v1alpha1.Channel, kafkaClusterAdm topicName := topicName(channel) r.logger.Info("creating topic on kafka cluster", zap.String("topic", topicName)) - partitions := DefaultNumPartitions + var arguments channelArgs if channel.Spec.Arguments != nil { var err error - arguments, err := unmarshalArguments(channel.Spec.Arguments.Raw) + arguments, err = unmarshalArguments(channel.Spec.Arguments.Raw) if err != nil { return err } - if num, ok := arguments[ArgumentNumPartitions]; ok { - parsedNum, ok := num.(float64) - if !ok { - return fmt.Errorf("could not parse argument %s for channel %s", ArgumentNumPartitions, fmt.Sprintf("%s/%s", channel.Namespace, channel.Name)) - } - partitions = int(parsedNum) - } + } + + if arguments.NumPartitions == 0 { + arguments.NumPartitions = DefaultNumPartitions } err := kafkaClusterAdmin.CreateTopic(topicName, &sarama.TopicDetail{ ReplicationFactor: 1, - NumPartitions: int32(partitions), + NumPartitions: arguments.NumPartitions, }, false) if err == sarama.ErrTopicAlreadyExists { return nil @@ -246,12 +254,12 @@ func topicName(channel *v1alpha1.Channel) string { return fmt.Sprintf("%s.%s", channel.Namespace, channel.Name) } -// unmarshalArguments unmarshal's a json/yaml serialized input and returns a map structure -func unmarshalArguments(bytes []byte) (map[string]interface{}, error) { - arguments := make(map[string]interface{}) +// unmarshalArguments unmarshal's a json/yaml serialized input and returns channelArgs +func unmarshalArguments(bytes []byte) (channelArgs, error) { + var arguments channelArgs if len(bytes) > 0 { if err := json.Unmarshal(bytes, &arguments); err != nil { - return nil, fmt.Errorf("error unmarshalling arguments: %s", err) + return arguments, fmt.Errorf("error unmarshalling arguments: %s", err) } } return arguments, nil diff --git a/pkg/provisioners/kafka/controller/channel/reconcile_test.go b/pkg/provisioners/kafka/controller/channel/reconcile_test.go index 224d6b335a4..6becf1e1cbe 100644 --- a/pkg/provisioners/kafka/controller/channel/reconcile_test.go +++ b/pkg/provisioners/kafka/controller/channel/reconcile_test.go @@ -43,6 +43,7 @@ const ( channelName = "test-channel" clusterChannelProvisionerName = "kafka-channel" testNS = "test-namespace" + argumentNumPartitions = "NumPartitions" ) var ( @@ -264,8 +265,8 @@ func TestProvisionChannel(t *testing.T) { }, { name: "provision with invalid channel arguments - errors", - c: getNewChannelWithArgs(channelName, map[string]interface{}{ArgumentNumPartitions: "invalid"}), - wantError: fmt.Sprintf("could not parse argument %s for channel test-namespace/test-channel", ArgumentNumPartitions), + c: getNewChannelWithArgs(channelName, map[string]interface{}{argumentNumPartitions: "invalid"}), + wantError: fmt.Sprintf("error unmarshalling arguments: json: cannot unmarshal string into Go struct field channelArgs.%s of type int32", argumentNumPartitions), }, { name: "provision with unmarshallable channel arguments - errors", @@ -280,7 +281,7 @@ func TestProvisionChannel(t *testing.T) { }, { name: "provision with valid channel arguments", - c: getNewChannelWithArgs(channelName, map[string]interface{}{ArgumentNumPartitions: 2}), + c: getNewChannelWithArgs(channelName, map[string]interface{}{argumentNumPartitions: 2}), wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), wantTopicDetail: &sarama.TopicDetail{ ReplicationFactor: 1, @@ -289,7 +290,7 @@ func TestProvisionChannel(t *testing.T) { }, { name: "provision but topic already exists - no error", - c: getNewChannelWithArgs(channelName, map[string]interface{}{ArgumentNumPartitions: 2}), + c: getNewChannelWithArgs(channelName, map[string]interface{}{argumentNumPartitions: 2}), wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), wantTopicDetail: &sarama.TopicDetail{ ReplicationFactor: 1, @@ -299,7 +300,7 @@ func TestProvisionChannel(t *testing.T) { }, { name: "provision but error creating topic", - c: getNewChannelWithArgs(channelName, map[string]interface{}{ArgumentNumPartitions: 2}), + c: getNewChannelWithArgs(channelName, map[string]interface{}{argumentNumPartitions: 2}), wantTopicName: fmt.Sprintf("%s.%s", testNS, channelName), wantTopicDetail: &sarama.TopicDetail{ ReplicationFactor: 1, diff --git a/pkg/provisioners/kafka/controller/reconcile.go b/pkg/provisioners/kafka/controller/reconcile.go index 0f72d7fc4a0..dd5d84b47a0 100644 --- a/pkg/provisioners/kafka/controller/reconcile.go +++ b/pkg/provisioners/kafka/controller/reconcile.go @@ -20,13 +20,13 @@ import ( "context" "fmt" - "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "go.uber.org/zap" - "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" + util "github.com/knative/eventing/pkg/provisioners" ) const ( @@ -38,6 +38,7 @@ const ( // converge the two. It then updates the Status block of the Provisioner resource // with the current status of the resource. func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) { + ctx := context.TODO() r.logger.Info("reconciling ClusterChannelProvisioner", zap.Any("request", request)) provisioner := &v1alpha1.ClusterChannelProvisioner{} err := r.client.Get(context.TODO(), request.NamespacedName, provisioner) @@ -58,20 +59,14 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err return reconcile.Result{}, nil } - original := provisioner.DeepCopy() + newProvisioner := provisioner.DeepCopy() // Reconcile this copy of the Provisioner and then write back any status // updates regardless of whether the reconcile error out. - err = r.reconcile(provisioner) - if !equality.Semantic.DeepEqual(original.Status, provisioner.Status) { - // If we didn't change anything then don't call updateStatus. - // This is important because the copy we loaded from the informer's - // cache may be stale and we don't want to overwrite a prior update - // to status with this stale state. - if _, err := r.updateStatus(provisioner); err != nil { - r.logger.Info("failed to update Provisioner status", zap.Error(err)) - return reconcile.Result{}, err - } + err = r.reconcile(newProvisioner) + if updateStatusErr := util.UpdateClusterChannelProvisionerStatus(ctx, r.client, newProvisioner); updateStatusErr != nil { + r.logger.Info("error updating ClusterChannelProvisioner Status", zap.Error(updateStatusErr)) + return reconcile.Result{}, updateStatusErr } // Requeue if the resource is not ready: @@ -97,22 +92,3 @@ func (r *reconciler) reconcile(provisioner *v1alpha1.ClusterChannelProvisioner) return nil } - -func (r *reconciler) updateStatus(provisioner *v1alpha1.ClusterChannelProvisioner) (*v1alpha1.ClusterChannelProvisioner, error) { - newProvisioner := &v1alpha1.ClusterChannelProvisioner{} - err := r.client.Get(context.TODO(), client.ObjectKey{Namespace: provisioner.Namespace, Name: provisioner.Name}, newProvisioner) - - if err != nil { - return nil, err - } - newProvisioner.Status = provisioner.Status - - // Until #38113 is merged, we must use Update instead of UpdateStatus to - // update the Status block of the Provisioner resource. UpdateStatus will not - // allow changes to the Spec of the resource, which is ideal for ensuring - // nothing other than resource status has been updated. - if err = r.client.Update(context.TODO(), newProvisioner); err != nil { - return nil, err - } - return newProvisioner, nil -} diff --git a/pkg/provisioners/kafka/main.go b/pkg/provisioners/kafka/main.go index 6da4e1932fa..65b97003b68 100644 --- a/pkg/provisioners/kafka/main.go +++ b/pkg/provisioners/kafka/main.go @@ -60,6 +60,7 @@ func main() { channel.ProvideController, } + // TODO the underlying config map needs to be watched and the config should be reloaded if there is a change. provisionerConfig, err := getProvisionerConfig() if err != nil { From 0190820da403237780782bcf4c520c55cc262cff Mon Sep 17 00:00:00 2001 From: Matthias Wessendorf Date: Thu, 8 Nov 2018 15:49:30 +0100 Subject: [PATCH 20/20] Updating docs, based on feedback --- config/provisioners/kafka/README.md | 52 ++++++++++++++++--- config/provisioners/kafka/broker/README.md | 2 +- config/provisioners/kafka/strimzi/README.md | 31 ----------- .../kafka/strimzi/kafka-ephemeral.yaml | 23 -------- .../kafka/strimzi/kafka-persistent.yaml | 27 ---------- 5 files changed, 47 insertions(+), 88 deletions(-) delete mode 100644 config/provisioners/kafka/strimzi/README.md delete mode 100644 config/provisioners/kafka/strimzi/kafka-ephemeral.yaml delete mode 100644 config/provisioners/kafka/strimzi/kafka-persistent.yaml diff --git a/config/provisioners/kafka/README.md b/config/provisioners/kafka/README.md index cf9bb522541..b8c18cf31b7 100644 --- a/config/provisioners/kafka/README.md +++ b/config/provisioners/kafka/README.md @@ -2,15 +2,35 @@ Deployment steps: 1. Setup [Knative Eventing](../../../DEVELOPMENT.md) -1. Install an Apache Kafka cluster. There are two choices: +1. If not done already, install an Apache Kafka cluster. There are two choices: * Simple installation of [Apache Kafka](broker). * A production grade installation using the [Strimzi Kafka Operator](strimzi). + Installation [guides](http://strimzi.io/quickstarts/) are provided for + kubernetes and Openshift. -1. Now that the Apache Kafka is installed, apply the 'Kafka' ClusterChannelProvisioner: +1. Now that Apache Kafka is installed, you need to configure the +`bootstrap_servers` value in the `kafka-channel-controller-config` ConfigMap, +located inside the `config/provisioners/kafka/kafka-provisioner.yaml` file: + ``` + ... + apiVersion: v1 + kind: ConfigMap + metadata: + name: kafka-channel-controller-config + namespace: knative-eventing + data: + # Broker URL's for the provisioner + bootstrap_servers: kafkabroker.kafka:9092 + ... + ``` + > Note: The `bootstrap_servers` needs to contain the address of at least + one broker of your Apache Kafka cluster. If you are using Strimzi, you need + to update the `bootstrap_servers` value to + `my-cluster-kafka-bootstrap.mynamespace:9092`. +1. Apply the 'Kafka' ClusterChannelProvisioner, Controller, and Dispatcher: ``` ko apply -f config/provisioners/kafka/kafka-provisioner.yaml ``` - > Note: If you are using Strimzi, you need to update the `KAFKA_BOOTSTRAP_SERVERS` value in the `kafka-channel-controller-config` ConfigMap to `my-cluster-kafka-bootstrap.kafka.9092`. 1. Create Channels that reference the 'kafka-channel'. ```yaml @@ -24,15 +44,35 @@ Deployment steps: kind: ClusterChannelProvisioner name: kafka-channel ``` -1. (Optional) Install [Kail](https://github.com/boz/kail) - Kubernetes tail ## Components The major components are: * ClusterChannelProvisioner Controller -* Channel Controller Config Map +* Channel Controller +* Channel Controller Config Map. +* Channel Dispatcher +* Channel Dispatcher Config Map. -The ClusterChannelProvisioner Controller and the Channel Controller are colocated in one Pod. +The ClusterChannelProvisioner Controller and the Channel Controller are colocated +in one Pod: ```shell kubectl get deployment -n knative-eventing kafka-channel-controller ``` + +The Channel Controller Config Map is used to configure the `bootstrap_servers` +of your Apache Kafka installation: +```shell +kubectl get configmap -n knative-eventing kafka-channel-dispatcher-config-map +``` + +The Channel Dispatcher receives and distributes all events: +```shell +kubectl get statefulset -n knative-eventing kafka-channel-dispatcher +``` + +The Channel Dispatcher Config Map is used to send information about Channels and +Subscriptions from the Channel Controller to the Channel Dispatcher: +```shell +kubectl get configmap -n knative-eventing kafka-channel-dispatcher-config-map +``` diff --git a/config/provisioners/kafka/broker/README.md b/config/provisioners/kafka/broker/README.md index 0c75dde69b5..b47d0945734 100644 --- a/config/provisioners/kafka/broker/README.md +++ b/config/provisioners/kafka/broker/README.md @@ -1,6 +1,6 @@ # Apache Kafka - simple installation -1. For an installation of a simple Apache Kafka cluster, a setup is provided: +1. For an installation of a simple (**non production**) Apache Kafka cluster, a setup is provided: ``` kubectl create namespace kafka kubectl apply -n kafka -f kafka-broker.yaml diff --git a/config/provisioners/kafka/strimzi/README.md b/config/provisioners/kafka/strimzi/README.md deleted file mode 100644 index efd7d5437df..00000000000 --- a/config/provisioners/kafka/strimzi/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Strimzi - Apache Kafka Operator - -[Strimzi](http://strimzi.io) makes it easy to run a production grade Apache Kafka installation on OpenShift or Kubernetes. It implements the _Kubernetes Operator pattern_ for mananging `clusters`, `topics` or `users` based on custom resource files. - -Installing the Strimzi Cluster Operator is simple and requires only a few steps. - -1. Create the `kafka` namespace in your Kubernetes cluster: - ``` - kubectl create namespace kafka - ``` - -1. Install the Strimzi _Cluster Operator_: - - * Applying yaml files from the [Strimzi release bundle](https://github.com/strimzi/strimzi-kafka-operator/releases/latest) - * Using the Strimzi Helm Chart - - Both ways for installing the _Cluster Operator_ are described in the [Strimzi documentation](http://strimzi.io/docs/master/#cluster-operator-str) itself - - > Note: Once this is done, you will have a `strimzi-cluster-operator` pod, which is able to install the Apache Kafka broker based on a `Kafka` custom resource file. - -1. Install the Apache Kafka cluster by providing the `kafka-persistent.yaml` Strimzi resource file from _this_ folder: - ``` - kubectl apply -f kafka-persistent.yaml -n kafka - ``` - > Note: If you want to use ephemeral storage, you have to use the `kafka-ephemeral.yaml` file. - - This provisions the complete installation of your Apache Kafka cluster. - -> Note: For learning more about Strimiz, please consult its [website](http://strimzi.io). - -Continue the configuration of Knative Eventing with [step `3`](../). diff --git a/config/provisioners/kafka/strimzi/kafka-ephemeral.yaml b/config/provisioners/kafka/strimzi/kafka-ephemeral.yaml deleted file mode 100644 index 6423bd39de9..00000000000 --- a/config/provisioners/kafka/strimzi/kafka-ephemeral.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: kafka.strimzi.io/v1alpha1 -kind: Kafka -metadata: - name: my-cluster -spec: - kafka: - replicas: 1 - listeners: - plain: {} - tls: {} - config: - offsets.topic.replication.factor: 3 - transaction.state.log.replication.factor: 3 - transaction.state.log.min.isr: 2 - storage: - type: ephemeral - zookeeper: - replicas: 1 - storage: - type: ephemeral - entityOperator: - topicOperator: {} - userOperator: {} diff --git a/config/provisioners/kafka/strimzi/kafka-persistent.yaml b/config/provisioners/kafka/strimzi/kafka-persistent.yaml deleted file mode 100644 index ea5fd60ce4d..00000000000 --- a/config/provisioners/kafka/strimzi/kafka-persistent.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: kafka.strimzi.io/v1alpha1 -kind: Kafka -metadata: - name: my-cluster -spec: - kafka: - replicas: 1 - listeners: - plain: {} - tls: {} - config: - offsets.topic.replication.factor: 3 - transaction.state.log.replication.factor: 3 - transaction.state.log.min.isr: 2 - storage: - type: persistent-claim - size: 1Gi - deleteClaim: false - zookeeper: - replicas: 1 - storage: - type: persistent-claim - size: 1Gi - deleteClaim: false - entityOperator: - topicOperator: {} - userOperator: {}