diff --git a/cmd/controller/main.go b/cmd/controller/main.go index c3bd0d887771..6eb8e97fd6ef 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -27,17 +27,20 @@ import ( "knative.dev/serving/pkg/reconciler/service" // This defines the shared main for injected controllers. + "knative.dev/pkg/injection" "knative.dev/pkg/injection/sharedmain" ) +var ctors = []injection.ControllerConstructor{ + configuration.NewController, + labeler.NewController, + revision.NewController, + route.NewController, + serverlessservice.NewController, + service.NewController, + gc.NewController, +} + func main() { - sharedmain.Main("controller", - configuration.NewController, - labeler.NewController, - revision.NewController, - route.NewController, - serverlessservice.NewController, - service.NewController, - gc.NewController, - ) + sharedmain.Main("controller", ctors...) } diff --git a/cmd/controller/main_test.go b/cmd/controller/main_test.go new file mode 100644 index 000000000000..84e691f182e3 --- /dev/null +++ b/cmd/controller/main_test.go @@ -0,0 +1,29 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "testing" + + "knative.dev/serving/test/ha" +) + +func TestNumController(t *testing.T) { + if got, want := len(ctors), ha.NumControllerReconcilers; got != want { + t.Errorf("Unexpected number of controller = %d, wanted %d. This likely means the constant should be updated.", got, want) + } +} diff --git a/cmd/webhook/main.go b/cmd/webhook/main.go index df6ba9c28244..05361ca33706 100644 --- a/cmd/webhook/main.go +++ b/cmd/webhook/main.go @@ -23,7 +23,7 @@ import ( "knative.dev/pkg/configmap" "knative.dev/pkg/controller" "knative.dev/pkg/injection/sharedmain" - pkgleaderelection "knative.dev/pkg/leaderelection" + "knative.dev/pkg/leaderelection" "knative.dev/pkg/logging" "knative.dev/pkg/metrics" "knative.dev/pkg/signals" @@ -42,7 +42,6 @@ import ( servingv1 "knative.dev/serving/pkg/apis/serving/v1" servingv1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" servingv1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" - "knative.dev/serving/pkg/leaderelection" extravalidation "knative.dev/serving/pkg/webhook" // config validation constructors @@ -152,16 +151,16 @@ func newConfigValidationController(ctx context.Context, cmw configmap.Watcher) * // The configmaps to validate. configmap.Constructors{ - tracingconfig.ConfigName: tracingconfig.NewTracingConfigFromConfigMap, - autoscalerconfig.ConfigName: autoscalerconfig.NewConfigFromConfigMap, - gc.ConfigName: gc.NewConfigFromConfigMapFunc(ctx), - network.ConfigName: network.NewConfigFromConfigMap, - deployment.ConfigName: deployment.NewConfigFromConfigMap, - metrics.ConfigMapName(): metrics.NewObservabilityConfigFromConfigMap, - logging.ConfigMapName(): logging.NewConfigFromConfigMap, - pkgleaderelection.ConfigMapName(): leaderelection.ValidateConfig, - domainconfig.DomainConfigName: domainconfig.NewDomainFromConfigMap, - defaultconfig.DefaultsConfigName: defaultconfig.NewDefaultsConfigFromConfigMap, + tracingconfig.ConfigName: tracingconfig.NewTracingConfigFromConfigMap, + autoscalerconfig.ConfigName: autoscalerconfig.NewConfigFromConfigMap, + gc.ConfigName: gc.NewConfigFromConfigMapFunc(ctx), + network.ConfigName: network.NewConfigFromConfigMap, + deployment.ConfigName: deployment.NewConfigFromConfigMap, + metrics.ConfigMapName(): metrics.NewObservabilityConfigFromConfigMap, + logging.ConfigMapName(): logging.NewConfigFromConfigMap, + leaderelection.ConfigMapName(): leaderelection.NewConfigFromConfigMap, + domainconfig.DomainConfigName: domainconfig.NewDomainFromConfigMap, + defaultconfig.DefaultsConfigName: defaultconfig.NewDefaultsConfigFromConfigMap, }, ) } diff --git a/config/core/configmaps/leader-election.yaml b/config/core/configmaps/leader-election.yaml index e2ac08de71c4..ec90184ec72a 100644 --- a/config/core/configmaps/leader-election.yaml +++ b/config/core/configmaps/leader-election.yaml @@ -20,7 +20,7 @@ metadata: labels: serving.knative.dev/release: devel annotations: - knative.dev/example-checksum: "b705abde" + knative.dev/example-checksum: "a255a6cc" data: _example: | ################################ @@ -49,7 +49,3 @@ data: # retryPeriod is how long the leader election client waits between tries of # actions; 2 seconds is the value used by core kubernetes controllers. retryPeriod: "2s" - - # enabledComponents is a comma-delimited list of component names for which - # leader election is enabled. Valid values are: - enabledComponents: "controller,contour-ingress-controller,hpaautoscaler,certcontroller,istiocontroller,net-http01,nscontroller,webhook" diff --git a/go.mod b/go.mod index 84539a88753a..140e07a25e50 100644 --- a/go.mod +++ b/go.mod @@ -38,10 +38,10 @@ require ( k8s.io/client-go v11.0.1-0.20190805182717-6502b5e7b1b5+incompatible k8s.io/code-generator v0.18.0 k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29 - knative.dev/caching v0.0.0-20200707200344-95a2aaeace0f - knative.dev/networking v0.0.0-20200707203944-725ec013d8a2 - knative.dev/pkg v0.0.0-20200713031612-b09a159e12c9 - knative.dev/test-infra v0.0.0-20200713045417-850e4e37918d + knative.dev/caching v0.0.0-20200713162518-90ce4328c69e + knative.dev/networking v0.0.0-20200713162319-e2731eead7e8 + knative.dev/pkg v0.0.0-20200713194318-a81727701f66 + knative.dev/test-infra v0.0.0-20200713185018-6b52776d44a4 ) replace ( diff --git a/go.sum b/go.sum index 1656e86c46f8..7c674cf10053 100644 --- a/go.sum +++ b/go.sum @@ -38,7 +38,6 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+ cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.4.0/go.mod h1:LFrqilwgdw4X2cJS9ALgzYmMu+ULyrUN6IHV3CPK4TM= cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= @@ -537,7 +536,6 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/licenseclassifier v0.0.0-20190926221455-842c0d70d702/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= -github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= github.com/google/licenseclassifier v0.0.0-20200708223521-3d09a0ea2f39/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= github.com/google/mako v0.0.0-20190821191249-122f8dcef9e3 h1:/o5e44nTD/QEEiWPGSFT3bSqcq3Qg7q27N9bv4gKh5M= github.com/google/mako v0.0.0-20190821191249-122f8dcef9e3/go.mod h1:YzLcVlL+NqWnmUEPuhS1LxDDwGO9WNbVlEXaF4IH35g= @@ -1379,8 +1377,6 @@ golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200527183253-8e7acdbce89d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200701000337-a32c0cb1d5b2 h1:xs+dSrelqXhHGIwIftyT5DHxJKH8hbDQnHc5KZ6i/u8= -golang.org/x/tools v0.0.0-20200701000337-a32c0cb1d5b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1 h1:rD1FcWVsRaMY+l8biE9jbWP5MS/CJJ/90a9TMkMgNrM= golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -1467,11 +1463,8 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200528110217-3d3490e7e671/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200701001935-0939c5918c31 h1:Of4QP8bfRqzDROen6+s2j/p0jCPgzvQRd9nHiactfn4= -google.golang.org/genproto v0.0.0-20200701001935-0939c5918c31/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e h1:k+p/u26/lVeNEpdxSeUrm7rTvoFckBKaf7gTzgmHyDA= google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1621,26 +1614,25 @@ k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuB k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw= k8s.io/test-infra v0.0.0-20200514184223-ba32c8aae783/go.mod h1:bW6thaPZfL2hW7ecjx2WYwlP9KQLM47/xIJyttkVk5s= k8s.io/test-infra v0.0.0-20200617221206-ea73eaeab7ff/go.mod h1:L3+cRvwftUq8IW1TrHji5m3msnc4uck/7LsE/GR/aZk= -k8s.io/test-infra v0.0.0-20200630233406-1dca6122872e/go.mod h1:L3+cRvwftUq8IW1TrHji5m3msnc4uck/7LsE/GR/aZk= k8s.io/test-infra v0.0.0-20200710134549-5891a1a4cc17/go.mod h1:L3+cRvwftUq8IW1TrHji5m3msnc4uck/7LsE/GR/aZk= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200124190032-861946025e34 h1:HjlUD6M0K3P8nRXmr2B9o4F9dUy9TCj/aEpReeyi6+k= k8s.io/utils v0.0.0-20200124190032-861946025e34/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= knative.dev/caching v0.0.0-20200116200605-67bca2c83dfa/go.mod h1:dHXFU6CGlLlbzaWc32g80cR92iuBSpsslDNBWI8C7eg= -knative.dev/caching v0.0.0-20200707200344-95a2aaeace0f h1:CwsFW9IKreayHjwzwbnlEtJaiwVYCC3D34AcN4yb6m0= -knative.dev/caching v0.0.0-20200707200344-95a2aaeace0f/go.mod h1:ZQa3DyEIY48qsx5U1ehllwgPHV8rFGzrBB/WonNUzLs= +knative.dev/caching v0.0.0-20200713162518-90ce4328c69e h1:ABjk18hjZYryC5Rs7YNT5PalPwXzfA3COKId0gWXuio= +knative.dev/caching v0.0.0-20200713162518-90ce4328c69e/go.mod h1:7I1DXX8uZX74qggUoUse5ZCaTuMIKyTVBZBpr/cmlaQ= knative.dev/eventing-contrib v0.11.2/go.mod h1:SnXZgSGgMSMLNFTwTnpaOH7hXDzTFtw0J8OmHflNx3g= -knative.dev/networking v0.0.0-20200707203944-725ec013d8a2 h1:Co9j0Q4ZJxwkzVFKUc6AsIXrdiASbaKdHBUROnfiej4= -knative.dev/networking v0.0.0-20200707203944-725ec013d8a2/go.mod h1:e1NL29AarTcgaR240oc4GUzqHtTfTu62JNrUHN3kIG0= +knative.dev/networking v0.0.0-20200713162319-e2731eead7e8 h1:BYQ/DJ1CQ0TQHzrrgdlg3zmL3djM/c4N4utLSEh9Fr8= +knative.dev/networking v0.0.0-20200713162319-e2731eead7e8/go.mod h1:9LCtmPUoygQ+M1ujGZeYcytAF3bDR42rlINsBhge06o= knative.dev/pkg v0.0.0-20200207155214-fef852970f43/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q= -knative.dev/pkg v0.0.0-20200707190344-0a8314b44495/go.mod h1:AqAJV6rYi8IGikDjJ/9ZQd9qKdkXVlesVnVjwx62YB8= -knative.dev/pkg v0.0.0-20200713031612-b09a159e12c9 h1:YSapebbZZbpH31YMSF0Egt7+IDi/og4S574eWqEXReo= knative.dev/pkg v0.0.0-20200713031612-b09a159e12c9/go.mod h1:aWPsPIHISvZetAm/2pnz+v6Ro5EYaX704Z/Zd9rTZ4M= -knative.dev/test-infra v0.0.0-20200707183444-aed09e56ddc7/go.mod h1:RjYAhXnZqeHw9+B0zsbqSPlae0lCvjekO/nw5ZMpLCs= -knative.dev/test-infra v0.0.0-20200710160019-5b9732bc24f7 h1:fAl3pG2I323tie8kuuNlB88B7RB8WJtCrsXIKuNh1U8= +knative.dev/pkg v0.0.0-20200713194318-a81727701f66 h1:H9s47uSb5NCRvnsyIQQpWo5q/cRJ5qEDpm/5pwdnPEg= +knative.dev/pkg v0.0.0-20200713194318-a81727701f66/go.mod h1:2xVLIH5SNUripobZvOEz3w/Ta9xqMkw7QmFIa2cbDFY= knative.dev/test-infra v0.0.0-20200710160019-5b9732bc24f7/go.mod h1:vtT6dLs/iNj8pKcfag8CSVqHKNMgyCFtU/g1pV7Bovs= knative.dev/test-infra v0.0.0-20200713045417-850e4e37918d h1:Q3LrAYSi+Ii2yZVUiA5Y3Jr4TCU6g/XN9ClVosejpJk= knative.dev/test-infra v0.0.0-20200713045417-850e4e37918d/go.mod h1:vtT6dLs/iNj8pKcfag8CSVqHKNMgyCFtU/g1pV7Bovs= +knative.dev/test-infra v0.0.0-20200713185018-6b52776d44a4 h1:BYNKY0hC5wsq533k6XbJXi+sb9LNNhM8NQV4mGljR2c= +knative.dev/test-infra v0.0.0-20200713185018-6b52776d44a4/go.mod h1:vtT6dLs/iNj8pKcfag8CSVqHKNMgyCFtU/g1pV7Bovs= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= diff --git a/pkg/leaderelection/config.go b/pkg/leaderelection/config.go index 74244d1f618b..6e9a729bbd8a 100644 --- a/pkg/leaderelection/config.go +++ b/pkg/leaderelection/config.go @@ -17,39 +17,7 @@ limitations under the License. package leaderelection import ( - "fmt" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" kle "knative.dev/pkg/leaderelection" ) -var ( - validComponents = sets.NewString( - "controller", - "contour-ingress-controller", - "hpaautoscaler", - "certcontroller", - "istiocontroller", - "net-http01", - "nscontroller", - "webhook", - ) -) - -// ValidateConfig enriches the leader election config validation -// with extra validations specific to serving. -func ValidateConfig(configMap *corev1.ConfigMap) (*kle.Config, error) { - config, err := kle.NewConfigFromMap(configMap.Data) - if err != nil { - return nil, err - } - - for component := range config.EnabledComponents { - if !validComponents.Has(component) { - return nil, fmt.Errorf("invalid enabledComponent %q: valid values are %q", component, validComponents.List()) - } - } - - return config, nil -} +var ValidateConfig = kle.NewConfigFromConfigMap diff --git a/pkg/leaderelection/config_test.go b/pkg/leaderelection/config_test.go index e3f5a3163b40..01055de3493d 100644 --- a/pkg/leaderelection/config_test.go +++ b/pkg/leaderelection/config_test.go @@ -24,7 +24,6 @@ import ( "github.com/google/go-cmp/cmp" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" . "knative.dev/pkg/configmap/testing" kle "knative.dev/pkg/leaderelection" @@ -32,12 +31,11 @@ import ( func okConfig() *kle.Config { return &kle.Config{ - ResourceLock: "leases", - Buckets: 1, - LeaseDuration: 15 * time.Second, - RenewDeadline: 10 * time.Second, - RetryPeriod: 2 * time.Second, - EnabledComponents: sets.NewString("controller"), + ResourceLock: "leases", + Buckets: 1, + LeaseDuration: 15 * time.Second, + RenewDeadline: 10 * time.Second, + RetryPeriod: 2 * time.Second, } } @@ -46,10 +44,9 @@ func okData() map[string]string { // values in this data come from the defaults suggested in the // code: // https://github.com/kubernetes/client-go/blob/kubernetes-1.16.0/tools/leaderelection/leaderelection.go - "leaseDuration": "15s", - "renewDeadline": "10s", - "retryPeriod": "2s", - "enabledComponents": "controller", + "leaseDuration": "15s", + "renewDeadline": "10s", + "retryPeriod": "2s", } } @@ -71,14 +68,6 @@ func TestValidateConfig(t *testing.T) { return data }(), err: errors.New(`failed to parse "renewDeadline": time: invalid duration not a duration`), - }, { - name: "invalid component", - data: func() map[string]string { - data := okData() - data["enabledComponents"] = "controller,frobulator" - return data - }(), - err: errors.New(`invalid enabledComponent "frobulator": valid values are ["certcontroller" "contour-ingress-controller" "controller" "hpaautoscaler" "istiocontroller" "net-http01" "nscontroller" "webhook"]`), }} for _, tc := range cases { @@ -114,12 +103,11 @@ func TestServingConfig(t *testing.T) { }, { name: "Example config", want: &kle.Config{ - ResourceLock: "leases", - Buckets: 1, - LeaseDuration: 15 * time.Second, - RenewDeadline: 10 * time.Second, - RetryPeriod: 2 * time.Second, - EnabledComponents: validComponents, + ResourceLock: "leases", + Buckets: 1, + LeaseDuration: 15 * time.Second, + RenewDeadline: 10 * time.Second, + RetryPeriod: 2 * time.Second, }, data: example, }} { diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh index cd8a336934c2..6fa2b9c1c408 100755 --- a/test/e2e-tests.sh +++ b/test/e2e-tests.sh @@ -102,20 +102,27 @@ if (( HTTPS )); then add_trap "turn_off_auto_tls" SIGKILL SIGTERM SIGQUIT fi + +# Keep this in sync with test/ha/ha.go +readonly REPLICAS=2 +readonly BUCKETS=10 + + # Enable allow-zero-initial-scale before running e2e tests (for test/e2e/initial_scale_test.go) kubectl -n ${SYSTEM_NAMESPACE} patch configmap/config-autoscaler --type=merge --patch='{"data":{"allow-zero-initial-scale":"true"}}' || failed=1 add_trap "kubectl -n ${SYSTEM_NAMESPACE} patch configmap/config-autoscaler --type=merge --patch='{\"data\":{\"allow-zero-initial-scale\":\"false\"}}'" SIGKILL SIGTERM SIGQUIT +# Keep the bucket count in sync with test/ha/ha.go kubectl -n "${SYSTEM_NAMESPACE}" patch configmap/config-leader-election --type=merge \ - --patch='{"data":{"enabledComponents":"controller,hpaautoscaler,webhook", "buckets": "10"}}' || failed=1 -add_trap "kubectl get cm config-leader-election -n ${SYSTEM_NAMESPACE} -oyaml | sed '/.*enabledComponents.*/d' | kubectl replace -f -" SIGKILL SIGTERM SIGQUIT + --patch='{"data":{"buckets": "'${BUCKETS}'"}}' || failed=1 +add_trap "kubectl get cm config-leader-election -n ${SYSTEM_NAMESPACE} -oyaml | sed '/.*buckets.*/d' | kubectl replace -f -" SIGKILL SIGTERM SIGQUIT # Save activator HPA original values for later use. hpa_spec=$(echo '{"spec": {'$(kubectl get hpa activator -n "knative-serving" -ojsonpath='"minReplicas": {.spec.minReplicas}, "maxReplicas": {.spec.maxReplicas}')'}}') kubectl patch hpa activator -n "${SYSTEM_NAMESPACE}" \ --type "merge" \ - --patch '{"spec": {"minReplicas": 2, "maxReplicas": 2}}' || failed=1 + --patch '{"spec": {"minReplicas": '${REPLICAS}', "maxReplicas": '${REPLICAS}'}}' || failed=1 add_trap "kubectl patch hpa activator -n ${SYSTEM_NAMESPACE} \ --type 'merge' \ --patch $hpa_spec" SIGKILL SIGTERM SIGQUIT @@ -126,7 +133,7 @@ for deployment in controller autoscaler-hpa webhook; do # Give it time to kill the pods. sleep 5 # Scale up components for HA tests - kubectl -n "${SYSTEM_NAMESPACE}" scale deployment "$deployment" --replicas=2 || failed=1 + kubectl -n "${SYSTEM_NAMESPACE}" scale deployment "$deployment" --replicas="${REPLICAS}" || failed=1 done add_trap "for deployment in controller autoscaler-hpa webhook; do \ kubectl -n ${SYSTEM_NAMESPACE} scale deployment $deployment --replicas=0; \ @@ -200,7 +207,8 @@ fi # Run HA tests separately as they're stopping core Knative Serving pods # Define short -spoofinterval to ensure frequent probing while stopping pods -go_test_e2e -timeout=15m -failfast -parallel=1 ./test/ha -spoofinterval="10ms" || failed=1 +go_test_e2e -timeout=15m -failfast -parallel=1 ./test/ha \ + -replicas="${REPLICAS:-1}" -buckets="${BUCKETS:-1}" -spoofinterval="10ms" || failed=1 (( failed )) && fail_test diff --git a/test/e2e_flags.go b/test/e2e_flags.go index 153e3e328123..0fb1feaa8f1a 100644 --- a/test/e2e_flags.go +++ b/test/e2e_flags.go @@ -35,6 +35,8 @@ type ServingEnvironmentFlags struct { IngressClass string // Indicates the class of Ingress provider to test. CertificateClass string // Indicates the class of Certificate provider to test. SystemNamespace string // Indicates the system namespace, in which Knative Serving is installed. + Buckets int // The number of reconciler buckets configured. + Replicas int // The number of controlplane replicas being run. } func initializeServingFlags() *ServingEnvironmentFlags { @@ -51,5 +53,11 @@ func initializeServingFlags() *ServingEnvironmentFlags { flag.StringVar(&f.CertificateClass, "certificateClass", network.CertManagerCertificateClassName, "Set this flag to the certificate class to test against.") + flag.IntVar(&f.Buckets, "buckets", 1, + "Set this flag to the number of reconciler buckets configured.") + + flag.IntVar(&f.Replicas, "replicas", 1, + "Set this flag to the number of controlplane replicas being run.") + return &f } diff --git a/test/ha/activator_test.go b/test/ha/activator_test.go index f18e0c57e7f4..90c9b477fbe5 100644 --- a/test/ha/activator_test.go +++ b/test/ha/activator_test.go @@ -64,8 +64,8 @@ func testActivatorHA(t *testing.T, gracePeriod *int64, slo float64) { podDeleteOptions := &metav1.DeleteOptions{GracePeriodSeconds: gracePeriod} - if err := pkgTest.WaitForDeploymentScale(clients.KubeClient, activatorDeploymentName, system.Namespace(), haReplicas); err != nil { - t.Fatalf("Deployment %s not scaled to %d: %v", activatorDeploymentName, haReplicas, err) + if err := pkgTest.WaitForDeploymentScale(clients.KubeClient, activatorDeploymentName, system.Namespace(), test.ServingFlags.Replicas); err != nil { + t.Fatalf("Deployment %s not scaled to %d: %v", activatorDeploymentName, test.ServingFlags.Replicas, err) } activators, err := clients.KubeClient.Kube.CoreV1().Pods(system.Namespace()).List(metav1.ListOptions{ LabelSelector: activatorLabel, @@ -125,7 +125,7 @@ func testActivatorHA(t *testing.T, gracePeriod *int64, slo float64) { if err := pkgTest.WaitForPodDeleted(clients.KubeClient, activator.Name, system.Namespace()); err != nil { t.Fatalf("Did not observe %s to actually be deleted: %v", activator.Name, err) } - if err := pkgTest.WaitForServiceEndpoints(clients.KubeClient, resourcesScaleToZero.Revision.Name, test.ServingNamespace, haReplicas); err != nil { + if err := pkgTest.WaitForServiceEndpoints(clients.KubeClient, resourcesScaleToZero.Revision.Name, test.ServingNamespace, test.ServingFlags.Replicas); err != nil { t.Fatalf("Deployment %s failed to scale up: %v", activatorDeploymentName, err) } if gracePeriod != nil && *gracePeriod == 0 { diff --git a/test/ha/autoscalerhpa_test.go b/test/ha/autoscalerhpa_test.go index 2206c281886d..346093ec1b0c 100644 --- a/test/ha/autoscalerhpa_test.go +++ b/test/ha/autoscalerhpa_test.go @@ -42,12 +42,12 @@ func TestAutoscalerHPAHANewRevision(t *testing.T) { cancel := logstream.Start(t) defer cancel() - if err := pkgTest.WaitForDeploymentScale(clients.KubeClient, autoscalerHPADeploymentName, system.Namespace(), haReplicas); err != nil { - t.Fatalf("Deployment %s not scaled to %d: %v", autoscalerHPADeploymentName, haReplicas, err) + if err := pkgTest.WaitForDeploymentScale(clients.KubeClient, autoscalerHPADeploymentName, system.Namespace(), test.ServingFlags.Replicas); err != nil { + t.Fatalf("Deployment %s not scaled to %d: %v", autoscalerHPADeploymentName, test.ServingFlags.Replicas, err) } // TODO(mattmoor): Once we switch to the new sharded leader election, we should use more than a single bucket here, but the test is still interesting. - leaders, err := pkgHa.WaitForNewLeaders(t, clients.KubeClient, autoscalerHPADeploymentName, system.Namespace(), sets.NewString(), 1 /* numBuckets */) + leaders, err := pkgHa.WaitForNewLeaders(t, clients.KubeClient, autoscalerHPADeploymentName, system.Namespace(), sets.NewString(), test.ServingFlags.Buckets) if err != nil { t.Fatal("Failed to get leader:", err) } @@ -74,7 +74,7 @@ func TestAutoscalerHPAHANewRevision(t *testing.T) { } // Wait for all of the old leaders to go away, and then for the right number to be back. - if _, err := pkgHa.WaitForNewLeaders(t, clients.KubeClient, autoscalerHPADeploymentName, system.Namespace(), leaders, 1 /* numBuckets */); err != nil { + if _, err := pkgHa.WaitForNewLeaders(t, clients.KubeClient, autoscalerHPADeploymentName, system.Namespace(), leaders, test.ServingFlags.Buckets); err != nil { t.Fatal("Failed to find new leader:", err) } diff --git a/test/ha/controller_test.go b/test/ha/controller_test.go index 73b662ea5b25..b54dab1ef6b7 100644 --- a/test/ha/controller_test.go +++ b/test/ha/controller_test.go @@ -32,19 +32,21 @@ import ( "knative.dev/serving/test/e2e" ) -const controllerDeploymentName = "controller" +const ( + controllerDeploymentName = "controller" +) func TestControllerHA(t *testing.T) { clients := e2e.Setup(t) cancel := logstream.Start(t) defer cancel() - if err := pkgTest.WaitForDeploymentScale(clients.KubeClient, controllerDeploymentName, system.Namespace(), haReplicas); err != nil { - t.Fatalf("Deployment %s not scaled to %d: %v", controllerDeploymentName, haReplicas, err) + if err := pkgTest.WaitForDeploymentScale(clients.KubeClient, controllerDeploymentName, system.Namespace(), test.ServingFlags.Replicas); err != nil { + t.Fatalf("Deployment %s not scaled to %d: %v", controllerDeploymentName, test.ServingFlags.Replicas, err) } // TODO(mattmoor): Once we switch to the new sharded leader election, we should use more than a single bucket here, but the test is still interesting. - leaders, err := pkgHa.WaitForNewLeaders(t, clients.KubeClient, controllerDeploymentName, system.Namespace(), sets.NewString(), 1 /* numBuckets */) + leaders, err := pkgHa.WaitForNewLeaders(t, clients.KubeClient, controllerDeploymentName, system.Namespace(), sets.NewString(), NumControllerReconcilers*test.ServingFlags.Buckets) if err != nil { t.Fatal("Failed to get leader:", err) } @@ -65,7 +67,7 @@ func TestControllerHA(t *testing.T) { } // Wait for all of the old leaders to go away, and then for the right number to be back. - if _, err := pkgHa.WaitForNewLeaders(t, clients.KubeClient, controllerDeploymentName, system.Namespace(), leaders, 1 /* numBuckets */); err != nil { + if _, err := pkgHa.WaitForNewLeaders(t, clients.KubeClient, controllerDeploymentName, system.Namespace(), leaders, NumControllerReconcilers*test.ServingFlags.Buckets); err != nil { t.Fatal("Failed to find new leader:", err) } diff --git a/test/ha/ha.go b/test/ha/ha.go index e5cea896dfe5..2d5fdde38a91 100644 --- a/test/ha/ha.go +++ b/test/ha/ha.go @@ -32,7 +32,7 @@ import ( ) const ( - haReplicas = 2 + NumControllerReconcilers = 7 // Keep in sync with ./cmd/controller/main.go ) func createPizzaPlanetService(t *testing.T, fopt ...rtesting.ServiceOption) (test.ResourceNames, *v1test.ResourceObjects) { diff --git a/vendor/knative.dev/pkg/apis/duck/v1_tests.go b/vendor/knative.dev/pkg/apis/duck/v1_tests.go index ca68b4c89613..ba56a994cade 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1_tests.go +++ b/vendor/knative.dev/pkg/apis/duck/v1_tests.go @@ -22,7 +22,7 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" - "knative.dev/pkg/apis/duck/v1" + v1 "knative.dev/pkg/apis/duck/v1" ) // Conditions is an Implementable "duck type". diff --git a/vendor/knative.dev/pkg/apis/duck/v1beta1/addressable_types.go b/vendor/knative.dev/pkg/apis/duck/v1beta1/addressable_types.go index 46cfed16018b..d8984d8ed245 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1beta1/addressable_types.go +++ b/vendor/knative.dev/pkg/apis/duck/v1beta1/addressable_types.go @@ -25,7 +25,7 @@ import ( "knative.dev/pkg/apis" "knative.dev/pkg/apis/duck/ducktypes" - "knative.dev/pkg/apis/duck/v1" + v1 "knative.dev/pkg/apis/duck/v1" ) // +genduck diff --git a/vendor/knative.dev/pkg/injection/sharedmain/main.go b/vendor/knative.dev/pkg/injection/sharedmain/main.go index 6c4acf71ec4b..676e47de8240 100644 --- a/vendor/knative.dev/pkg/injection/sharedmain/main.go +++ b/vendor/knative.dev/pkg/injection/sharedmain/main.go @@ -33,14 +33,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes/scheme" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/leaderelection" - "k8s.io/client-go/tools/leaderelection/resourcelock" - "k8s.io/client-go/tools/record" "go.uber.org/zap" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -48,7 +42,7 @@ import ( "knative.dev/pkg/configmap" "knative.dev/pkg/controller" "knative.dev/pkg/injection" - kle "knative.dev/pkg/leaderelection" + "knative.dev/pkg/leaderelection" "knative.dev/pkg/logging" "knative.dev/pkg/metrics" "knative.dev/pkg/profiling" @@ -108,110 +102,65 @@ func GetLoggingConfig(ctx context.Context) (*logging.Config, error) { } // GetLeaderElectionConfig gets the leader election config. -func GetLeaderElectionConfig(ctx context.Context) (*kle.Config, error) { - leaderElectionConfigMap, err := kubeclient.Get(ctx).CoreV1().ConfigMaps(system.Namespace()).Get(kle.ConfigMapName(), metav1.GetOptions{}) +func GetLeaderElectionConfig(ctx context.Context) (*leaderelection.Config, error) { + leaderElectionConfigMap, err := kubeclient.Get(ctx).CoreV1().ConfigMaps(system.Namespace()).Get(leaderelection.ConfigMapName(), metav1.GetOptions{}) if apierrors.IsNotFound(err) { - return kle.NewConfigFromConfigMap(nil) + return leaderelection.NewConfigFromConfigMap(nil) } else if err != nil { return nil, err } - return kle.NewConfigFromConfigMap(leaderElectionConfigMap) + return leaderelection.NewConfigFromConfigMap(leaderElectionConfigMap) } -// Main runs the generic main flow for non-webhook controllers with a new -// context. Use WebhookMainWith* if you need to serve webhooks. +// Main runs the generic main flow with a new context. +// If any of the contructed controllers are AdmissionControllers or Conversion webhooks, +// then a webhook is started to serve them. func Main(component string, ctors ...injection.ControllerConstructor) { // Set up signals so we handle the first shutdown signal gracefully. MainWithContext(signals.NewContext(), component, ctors...) } -// MainWithContext runs the generic main flow for non-webhook controllers. Use -// WebhookMainWithContext if you need to serve webhooks. -func MainWithContext(ctx context.Context, component string, ctors ...injection.ControllerConstructor) { - MainWithConfig(ctx, component, ParseAndGetConfigOrDie(), ctors...) -} - -// MainWithConfig runs the generic main flow for non-webhook controllers. Use -// WebhookMainWithConfig if you need to serve webhooks. -func MainWithConfig(ctx context.Context, component string, cfg *rest.Config, ctors ...injection.ControllerConstructor) { - log.Printf("Registering %d clients", len(injection.Default.GetClients())) - log.Printf("Registering %d informer factories", len(injection.Default.GetInformerFactories())) - log.Printf("Registering %d informers", len(injection.Default.GetInformers())) - log.Printf("Registering %d controllers", len(ctors)) - - MemStatsOrDie(ctx) - - // Adjust our client's rate limits based on the number of controllers we are running. - cfg.QPS = float32(len(ctors)) * rest.DefaultQPS - cfg.Burst = len(ctors) * rest.DefaultBurst - ctx = injection.WithConfig(ctx, cfg) - - ctx, informers := injection.Default.SetupInformers(ctx, cfg) +// Legacy aliases for back-compat. +var ( + WebhookMainWithContext = MainWithContext + WebhookMainWithConfig = MainWithConfig +) - logger, atomicLevel := SetupLoggerOrDie(ctx, component) - defer flush(logger) - ctx = logging.WithLogger(ctx, logger) - profilingHandler := profiling.NewHandler(logger, false) - profilingServer := profiling.NewServer(profilingHandler) - eg, egCtx := errgroup.WithContext(ctx) - eg.Go(profilingServer.ListenAndServe) - go func() { - // This will block until either a signal arrives or one of the grouped functions - // returns an error. - <-egCtx.Done() - - profilingServer.Shutdown(context.Background()) - if err := eg.Wait(); err != nil && err != http.ErrServerClosed { - logger.Errorw("Error while running server", zap.Error(err)) - } - }() - CheckK8sClientMinimumVersionOrDie(ctx, logger) +// MainWithContext runs the generic main flow for controllers and +// webhooks. Use MainWithContext if you do not need to serve webhooks. +func MainWithContext(ctx context.Context, component string, ctors ...injection.ControllerConstructor) { - run := func(ctx context.Context) { - cmw := SetupConfigMapWatchOrDie(ctx, logger) - controllers, _ := ControllersAndWebhooksFromCtors(ctx, cmw, ctors...) - WatchLoggingConfigOrDie(ctx, cmw, logger, atomicLevel, component) - WatchObservabilityConfigOrDie(ctx, cmw, profilingHandler, logger, component) + // TODO(mattmoor): Remove this once HA is stable. + disableHighAvailability := flag.Bool("disable-ha", false, + "Whether to disable high-availability functionality for this component. This flag will be deprecated "+ + "and removed when we have promoted this feature to stable, so do not pass it without filing an "+ + "issue upstream!") - logger.Info("Starting configuration manager...") - if err := cmw.Start(ctx.Done()); err != nil { - logger.Fatalw("Failed to start configuration manager", zap.Error(err)) - } - logger.Info("Starting informers...") - if err := controller.StartInformers(ctx.Done(), informers...); err != nil { - logger.Fatalw("Failed to start informers", zap.Error(err)) - } - logger.Info("Starting controllers...") - go controller.StartAll(ctx, controllers...) + // HACK: This parses flags, so the above should be set once this runs. + cfg := ParseAndGetConfigOrDie() - <-ctx.Done() + if *disableHighAvailability { + ctx = WithHADisabled(ctx) } - // Set up leader election config - leaderElectionConfig, err := GetLeaderElectionConfig(ctx) - if err != nil { - logger.Fatalw("Error loading leader election configuration", zap.Error(err)) - } - leConfig := leaderElectionConfig.GetComponentConfig(component) + MainWithConfig(ctx, component, cfg, ctors...) +} - if !leConfig.LeaderElect { - logger.Infof("%v will not run in leader-elected mode", component) - run(ctx) - } else { - RunLeaderElected(ctx, logger, run, leConfig) - } +type haDisabledKey struct{} + +// WithHADisabled signals to MainWithConfig that it should not set up an appropriate leader elector for this component. +func WithHADisabled(ctx context.Context) context.Context { + return context.WithValue(ctx, haDisabledKey{}, struct{}{}) } -// WebhookMainWithContext runs the generic main flow for controllers and -// webhooks. Use MainWithContext if you do not need to serve webhooks. -func WebhookMainWithContext(ctx context.Context, component string, ctors ...injection.ControllerConstructor) { - WebhookMainWithConfig(ctx, component, ParseAndGetConfigOrDie(), ctors...) +// IsHADisabled checks the context for the desired to disabled leader elector. +func IsHADisabled(ctx context.Context) bool { + return ctx.Value(haDisabledKey{}) != nil } -// WebhookMainWithConfig runs the generic main flow for controllers and webhooks -// with the given config. Use MainWithConfig if you do not need to serve -// webhooks. -func WebhookMainWithConfig(ctx context.Context, component string, cfg *rest.Config, ctors ...injection.ControllerConstructor) { +// MainWithConfig runs the generic main flow for controllers and webhooks +// with the given config. +func MainWithConfig(ctx context.Context, component string, cfg *rest.Config, ctors ...injection.ControllerConstructor) { log.Printf("Registering %d clients", len(injection.Default.GetClients())) log.Printf("Registering %d informer factories", len(injection.Default.GetInformerFactories())) log.Printf("Registering %d informers", len(injection.Default.GetInformers())) @@ -238,10 +187,11 @@ func WebhookMainWithConfig(ctx context.Context, component string, cfg *rest.Conf if err != nil { logger.Fatalf("Error loading leader election configuration: %v", err) } - leConfig := leaderElectionConfig.GetComponentConfig(component) - if leConfig.LeaderElect { + + if !IsHADisabled(ctx) { // Signal that we are executing in a context with leader election. - ctx = kle.WithDynamicLeaderElectorBuilder(ctx, kubeclient.Get(ctx), leConfig) + ctx = leaderelection.WithDynamicLeaderElectorBuilder(ctx, kubeclient.Get(ctx), + leaderElectionConfig.GetComponentConfig(component)) } controllers, webhooks := ControllersAndWebhooksFromCtors(ctx, cmw, ctors...) @@ -251,6 +201,14 @@ func WebhookMainWithConfig(ctx context.Context, component string, cfg *rest.Conf eg, egCtx := errgroup.WithContext(ctx) eg.Go(profilingServer.ListenAndServe) + // Many of the webhooks rely on configuration, e.g. configurable defaults, feature flags. + // So make sure that we have synchonized our configuration state before launching the + // webhooks, so that things are properly initialized. + logger.Info("Starting configuration manager...") + if err := cmw.Start(ctx.Done()); err != nil { + logger.Fatalw("Failed to start configuration manager", zap.Error(err)) + } + // If we have one or more admission controllers, then start the webhook // and pass them in. var wh *webhook.Webhook @@ -267,10 +225,6 @@ func WebhookMainWithConfig(ctx context.Context, component string, cfg *rest.Conf }) } - logger.Info("Starting configuration manager...") - if err := cmw.Start(ctx.Done()); err != nil { - logger.Fatalw("Failed to start configuration manager", zap.Error(err)) - } logger.Info("Starting informers...") if err := controller.StartInformers(ctx.Done(), informers...); err != nil { logger.Fatalw("Failed to start informers", zap.Error(err)) @@ -414,7 +368,7 @@ func ControllersAndWebhooksFromCtors(ctx context.Context, // Check whether the context has been infused with a leader elector builder. // If it has, then every reconciler we plan to start MUST implement LeaderAware. - leEnabled := kle.HasLeaderElection(ctx) + leEnabled := leaderelection.HasLeaderElection(ctx) controllers := make([]*controller.Impl, 0, len(ctors)) webhooks := make([]interface{}, 0) @@ -437,66 +391,3 @@ func ControllersAndWebhooksFromCtors(ctx context.Context, return controllers, webhooks } - -// RunLeaderElected runs the given function in leader elected mode. The function -// will be run only once the leader election lock is obtained. -func RunLeaderElected(ctx context.Context, logger *zap.SugaredLogger, run func(context.Context), leConfig kle.ComponentConfig) { - recorder := controller.GetEventRecorder(ctx) - if recorder == nil { - // Create event broadcaster - logger.Debug("Creating event broadcaster") - eventBroadcaster := record.NewBroadcaster() - watches := []watch.Interface{ - eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof), - eventBroadcaster.StartRecordingToSink( - &typedcorev1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events(system.Namespace())}), - } - recorder = eventBroadcaster.NewRecorder( - scheme.Scheme, corev1.EventSource{Component: leConfig.Component}) - go func() { - <-ctx.Done() - for _, w := range watches { - w.Stop() - } - }() - } - - // Create a unique identifier so that two controllers on the same host don't - // race. - id, err := kle.UniqueID() - if err != nil { - logger.Fatalw("Failed to get unique ID for leader election", zap.Error(err)) - } - logger.Infof("%v will run in leader-elected mode with id %v", leConfig.Component, id) - - // rl is the resource used to hold the leader election lock. - rl, err := resourcelock.New(leConfig.ResourceLock, - system.Namespace(), // use namespace we are running in - leConfig.Component, // component is used as the resource name - kubeclient.Get(ctx).CoreV1(), - kubeclient.Get(ctx).CoordinationV1(), - resourcelock.ResourceLockConfig{ - Identity: id, - EventRecorder: recorder, - }) - if err != nil { - logger.Fatalw("Error creating lock", zap.Error(err)) - } - - // Execute the `run` function when we have the lock. - leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ - Lock: rl, - LeaseDuration: leConfig.LeaseDuration, - RenewDeadline: leConfig.RenewDeadline, - RetryPeriod: leConfig.RetryPeriod, - Callbacks: leaderelection.LeaderCallbacks{ - OnStartedLeading: run, - OnStoppedLeading: func() { - logger.Fatal("Leader election lost") - }, - }, - ReleaseOnCancel: true, - // TODO: use health check watchdog, knative/pkg#1048 - Name: leConfig.Component, - }) -} diff --git a/vendor/knative.dev/pkg/leaderelection/config.go b/vendor/knative.dev/pkg/leaderelection/config.go index 880dd8c2b988..b694c4fd206b 100644 --- a/vendor/knative.dev/pkg/leaderelection/config.go +++ b/vendor/knative.dev/pkg/leaderelection/config.go @@ -52,11 +52,6 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { cm.AsDuration("retryPeriod", &config.RetryPeriod), cm.AsUint32("buckets", &config.Buckets), - - // enabledComponents are not validated here, because they are dependent on - // the component. Components should provide additional validation for this - // field. - cm.AsStringSet("enabledComponents", &config.EnabledComponents), ); err != nil { return nil, err } @@ -84,45 +79,42 @@ func NewConfigFromConfigMap(configMap *corev1.ConfigMap) (*Config, error) { // contained within a single namespace. Typically these will correspond to a // single source repository, viz: serving or eventing. type Config struct { - ResourceLock string - Buckets uint32 - LeaseDuration time.Duration - RenewDeadline time.Duration - RetryPeriod time.Duration + ResourceLock string + Buckets uint32 + LeaseDuration time.Duration + RenewDeadline time.Duration + RetryPeriod time.Duration + + // This field is deprecated and will be removed once downstream + // repositories have removed their validation of it. + // TODO(https://github.com/knative/pkg/issues/1478): Remove this field. EnabledComponents sets.String } func (c *Config) GetComponentConfig(name string) ComponentConfig { - if c.EnabledComponents.Has(name) { - return ComponentConfig{ - Component: name, - LeaderElect: true, - Buckets: c.Buckets, - ResourceLock: c.ResourceLock, - LeaseDuration: c.LeaseDuration, - RenewDeadline: c.RenewDeadline, - RetryPeriod: c.RetryPeriod, - } + return ComponentConfig{ + Component: name, + Buckets: c.Buckets, + ResourceLock: c.ResourceLock, + LeaseDuration: c.LeaseDuration, + RenewDeadline: c.RenewDeadline, + RetryPeriod: c.RetryPeriod, } - - return defaultComponentConfig(name) } func defaultConfig() *Config { return &Config{ - ResourceLock: "leases", - Buckets: 1, - LeaseDuration: 15 * time.Second, - RenewDeadline: 10 * time.Second, - RetryPeriod: 2 * time.Second, - EnabledComponents: sets.NewString(), + ResourceLock: "leases", + Buckets: 1, + LeaseDuration: 15 * time.Second, + RenewDeadline: 10 * time.Second, + RetryPeriod: 2 * time.Second, } } // ComponentConfig represents the leader election config for a single component. type ComponentConfig struct { Component string - LeaderElect bool Buckets uint32 ResourceLock string LeaseDuration time.Duration @@ -165,13 +157,6 @@ func newStatefulSetConfig() (*statefulSetConfig, error) { return ssc, nil } -func defaultComponentConfig(name string) ComponentConfig { - return ComponentConfig{ - Component: name, - LeaderElect: false, - } -} - // ConfigMapName returns the name of the configmap to read for leader election // settings. func ConfigMapName() string { diff --git a/vendor/modules.txt b/vendor/modules.txt index 42758046f958..156e0ad39d18 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -987,7 +987,7 @@ k8s.io/utils/buffer k8s.io/utils/integer k8s.io/utils/pointer k8s.io/utils/trace -# knative.dev/caching v0.0.0-20200707200344-95a2aaeace0f +# knative.dev/caching v0.0.0-20200713162518-90ce4328c69e ## explicit knative.dev/caching/config knative.dev/caching/pkg/apis/caching @@ -1008,7 +1008,7 @@ knative.dev/caching/pkg/client/injection/informers/caching/v1alpha1/image/fake knative.dev/caching/pkg/client/injection/informers/factory knative.dev/caching/pkg/client/injection/informers/factory/fake knative.dev/caching/pkg/client/listers/caching/v1alpha1 -# knative.dev/networking v0.0.0-20200707203944-725ec013d8a2 +# knative.dev/networking v0.0.0-20200713162319-e2731eead7e8 ## explicit knative.dev/networking/pkg/apis/networking knative.dev/networking/pkg/apis/networking/v1alpha1 @@ -1034,7 +1034,7 @@ knative.dev/networking/pkg/client/injection/informers/networking/v1alpha1/server knative.dev/networking/pkg/client/injection/reconciler/networking/v1alpha1/serverlessservice knative.dev/networking/pkg/client/istio/listers/networking/v1alpha3 knative.dev/networking/pkg/client/listers/networking/v1alpha1 -# knative.dev/pkg v0.0.0-20200713031612-b09a159e12c9 +# knative.dev/pkg v0.0.0-20200713194318-a81727701f66 ## explicit knative.dev/pkg/apiextensions/storageversion knative.dev/pkg/apiextensions/storageversion/cmd/migrate @@ -1155,7 +1155,7 @@ knative.dev/pkg/webhook/resourcesemantics/conversion knative.dev/pkg/webhook/resourcesemantics/defaulting knative.dev/pkg/webhook/resourcesemantics/validation knative.dev/pkg/websocket -# knative.dev/test-infra v0.0.0-20200713045417-850e4e37918d +# knative.dev/test-infra v0.0.0-20200713185018-6b52776d44a4 ## explicit knative.dev/test-infra/scripts # sigs.k8s.io/yaml v1.2.0