diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c9c4451..0716a75 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -3,9 +3,12 @@ name: Tests on: push: branches: - - '**' + - main pull_request: + # Allow to run this workflow manually from the Actions tab + workflow_dispatch: + permissions: contents: read @@ -47,6 +50,10 @@ jobs: env: CLOUDSCALE_API_TOKEN: ${{ secrets.CLOUDSCALE_API_TOKEN }} + # Prevent integration tests from running in parallel. + concurrency: + group: integration + steps: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 diff --git a/.gitignore b/.gitignore index f5ed48e..6528729 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ *.iml cover.out k8test +bin/ \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..1cf851f --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,27 @@ +linters: + disable-all: true + enable: + - errcheck + - exportloopref + - gocritic + - gofmt + - gosimple + - govet + - ineffassign + - lll + - makezero + - staticcheck + - unparam + - unused + - wrapcheck + +linters-settings: + lll: + line-length: 80 + tab-width: 4 + +issues: + exclude-rules: + - linters: + - lll + source: ".+LoadBalancer|// http(s)://" diff --git a/Dockerfile b/Dockerfile index 6bceb09..e38fae1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20-alpine AS build +FROM golang:1.21-alpine AS build ARG VERSION RUN apk add --no-cache git diff --git a/Makefile b/Makefile index 30fabbc..b2d0c06 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,12 @@ lint: staticcheck ./... test: - go test -race -coverpkg=./pkg/cloudscale_ccm -coverprofile cover.out ./pkg/cloudscale_ccm -v + go test -race -v \ + -coverpkg=./pkg/cloudscale_ccm,./pkg/internal/actions,./pkg/internal/compare \ + -coverprofile cover.out \ + ./pkg/cloudscale_ccm \ + ./pkg/internal/actions \ + ./pkg/internal/compare integration: K8TEST_PATH=${PWD}/k8test go test -count=1 -tags=integration ./pkg/internal/integration -v diff --git a/cmd/cloudscale-cloud-controller-manager/main.go b/cmd/cloudscale-cloud-controller-manager/main.go index 6670255..ec4b055 100644 --- a/cmd/cloudscale-cloud-controller-manager/main.go +++ b/cmd/cloudscale-cloud-controller-manager/main.go @@ -9,6 +9,7 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/cloud-provider/app" "k8s.io/cloud-provider/app/config" + "k8s.io/cloud-provider/names" "k8s.io/cloud-provider/options" "k8s.io/component-base/cli" cliflag "k8s.io/component-base/cli/flag" @@ -32,8 +33,10 @@ func main() { ccmOptions, cloudInitializer, app.DefaultInitFuncConstructors, + names.CCMControllerAliases(), cliflag.NamedFlagSets{}, - wait.NeverStop) + wait.NeverStop, + ) os.Exit(cli.Run(cmd)) } diff --git a/deploy/kubernetes/releases/latest.yml b/deploy/kubernetes/releases/latest.yml index 56cbdca..8e173cd 100644 --- a/deploy/kubernetes/releases/latest.yml +++ b/deploy/kubernetes/releases/latest.yml @@ -32,6 +32,15 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - services/status + verbs: + - list + - patch + - update + - watch - apiGroups: - "" resources: @@ -131,6 +140,7 @@ spec: - "cloudscale-cloud-controller-manager" - "--allow-untagged-cloud" - "--v=3" + - "--concurrent-service-syncs=2" env: - name: CLOUDSCALE_API_URL value: https://api.cloudscale.ch/ diff --git a/go.mod b/go.mod index 58fc38f..5128a15 100644 --- a/go.mod +++ b/go.mod @@ -3,18 +3,19 @@ module github.com/cloudscale-ch/cloudscale-cloud-controller-manager go 1.21 require ( - github.com/cloudscale-ch/cloudscale-go-sdk v1.11.0 golang.org/x/oauth2 v0.9.0 - k8s.io/api v0.27.3 - k8s.io/apimachinery v0.27.3 - k8s.io/cloud-provider v0.27.3 - k8s.io/component-base v0.27.3 + k8s.io/api v0.28.2 + k8s.io/apimachinery v0.28.2 + k8s.io/cloud-provider v0.28.2 + k8s.io/component-base v0.28.2 k8s.io/klog/v2 v2.100.1 ) require ( + github.com/cloudscale-ch/cloudscale-go-sdk/v4 v4.0.0 github.com/google/uuid v1.3.0 github.com/stretchr/testify v1.8.3 + k8s.io/client-go v0.28.2 ) require ( @@ -42,8 +43,8 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/cel-go v0.16.0 // indirect - github.com/google/gnostic v0.6.9 // indirect + github.com/google/cel-go v0.16.1 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect @@ -54,7 +55,6 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/mitchellh/mapstructure v1.4.1 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -84,13 +84,13 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.10.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect - golang.org/x/net v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.9.0 // indirect - golang.org/x/term v0.9.0 // indirect - golang.org/x/text v0.10.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect @@ -102,12 +102,11 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.27.3 // indirect - k8s.io/client-go v0.27.3 // indirect - k8s.io/component-helpers v0.27.3 // indirect - k8s.io/controller-manager v0.27.3 // indirect - k8s.io/kms v0.27.3 // indirect - k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect + k8s.io/apiserver v0.28.2 // indirect + k8s.io/component-helpers v0.28.2 // indirect + k8s.io/controller-manager v0.28.2 // indirect + k8s.io/kms v0.28.2 // indirect + k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/utils v0.0.0-20230505201702-9f6742963106 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect diff --git a/go.sum b/go.sum index 2c62278..bfcda8f 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,3 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= @@ -7,11 +5,8 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= @@ -22,20 +17,12 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudscale-ch/cloudscale-go-sdk v1.11.0 h1:V13SCwWRuVWaoWb2u9LLdfNZ9/0hTTC+uW8cwvvvHvc= -github.com/cloudscale-ch/cloudscale-go-sdk v1.11.0/go.mod h1:FhOTOCgKAVvRRMQc1mC0D7xK/3zYnmcZBWFXNkacvMc= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cloudscale-ch/cloudscale-go-sdk/v4 v4.0.0 h1:sB3uRgv3UVAdnpGM2ZxR6H5XyCbLHLRmaLheq8drRt4= +github.com/cloudscale-ch/cloudscale-go-sdk/v4 v4.0.0/go.mod h1:FfL+Dw1kbt/iutAP/vGqJV+1nu6azcKkYexjYIswDVw= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= @@ -49,27 +36,18 @@ github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= @@ -85,46 +63,28 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= -github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.16.0 h1:DG9YQ8nFCFXAs/FDDwBxmL1tpKNrdlGUM9U3537bX/Y= -github.com/google/cel-go v0.16.0/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo= +github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -133,7 +93,6 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= @@ -158,7 +117,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -170,8 +128,6 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -181,24 +137,22 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= -github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= -github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= -github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= +github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= +github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -206,19 +160,16 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0 github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -226,29 +177,26 @@ github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gt github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= -go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU= -go.etcd.io/etcd/client/v2 v2.305.7/go.mod h1:GQGT5Z3TBuAQGvgPfhR7VPySu/SudxmEkRq9BgzFU6s= +go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo= +go.etcd.io/etcd/client/v2 v2.305.9/go.mod h1:0NBdNx9wbxtEQLwAQtrDHwx58m02vXpDcgSYI2seohQ= go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= -go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0= -go.etcd.io/etcd/pkg/v3 v3.5.7/go.mod h1:kcOfWt3Ov9zgYdOiJ/o1Y9zFfLhQjylTgL4Lru8opRo= -go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc= -go.etcd.io/etcd/raft/v3 v3.5.7/go.mod h1:TflkAb/8Uy6JFBxcRaH2Fr6Slm9mCPVdI2efzxY96yU= -go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk= -go.etcd.io/etcd/server/v3 v3.5.7/go.mod h1:gxBgT84issUVBRpZ3XkW1T55NjOb4vZZRI4wVvNhf4A= +go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ= +go.etcd.io/etcd/pkg/v3 v3.5.9/go.mod h1:BZl0SAShQFk0IpLWR78T/+pyt8AruMHhTNNX73hkNVY= +go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= +go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= +go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= +go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= @@ -267,7 +215,6 @@ go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiM go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.20.0 h1:BLOA1cZBAGSbRiNuGCCKiFrCdYB7deeHDeD1SueyOfA= go.opentelemetry.io/proto/otlp v0.20.0/go.mod h1:3QgjzPALBIv9pcknj2EXGPXjYPFdUh/RQfF8Lz3+Vnw= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -281,162 +228,102 @@ go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= -golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= -golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190517181255-950ef44c6e07/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs= golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= -golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= -golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.56.0 h1:+y7Bs8rtMd07LeXmL3NxcTLn7mUkbKZqEpPhMNkwJEE= google.golang.org/grpc v1.56.0/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y= -k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg= -k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= -k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= -k8s.io/apiserver v0.27.3 h1:AxLvq9JYtveYWK+D/Dz/uoPCfz8JC9asR5z7+I/bbQ4= -k8s.io/apiserver v0.27.3/go.mod h1:Y61+EaBMVWUBJtxD5//cZ48cHZbQD+yIyV/4iEBhhNA= -k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8= -k8s.io/client-go v0.27.3/go.mod h1:2MBEKuTo6V1lbKy3z1euEGnhPfGZLKTS9tiJ2xodM48= -k8s.io/cloud-provider v0.27.3 h1:YylqJpKCB3O2MRnNXshxSVOQTOZE4I0G+cnyOfLwkGA= -k8s.io/cloud-provider v0.27.3/go.mod h1:+C4rgsL3O0pxXdjoxRDOjCzNTj4C6jYUmK2OyogK1Jw= -k8s.io/component-base v0.27.3 h1:g078YmdcdTfrCE4fFobt7qmVXwS8J/3cI1XxRi/2+6k= -k8s.io/component-base v0.27.3/go.mod h1:JNiKYcGImpQ44iwSYs6dysxzR9SxIIgQalk4HaCNVUY= -k8s.io/component-helpers v0.27.3 h1:oK7+AlwBKsSUIIRC5Vv8/4HEtmgzXNQD+zLbsOUwVso= -k8s.io/component-helpers v0.27.3/go.mod h1:uxhXqoWHh4eBVcPj+LKWjtQq0V/vP5ihn4xmf5xNZso= -k8s.io/controller-manager v0.27.3 h1:tw1zoCi8ylYXoyImThlPkmdo9wQDtyhAojrjWdfBv/E= -k8s.io/controller-manager v0.27.3/go.mod h1:dH5WQMqZOTHZdY8sTQRv1RkZRibaaDx7sncvejUUICc= +k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= +k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= +k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= +k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= +k8s.io/apiserver v0.28.2 h1:rBeYkLvF94Nku9XfXyUIirsVzCzJBs6jMn3NWeHieyI= +k8s.io/apiserver v0.28.2/go.mod h1:f7D5e8wH8MWcKD7azq6Csw9UN+CjdtXIVQUyUhrtb+E= +k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= +k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= +k8s.io/cloud-provider v0.28.2 h1:9qsYm86hm4bnPgZbl9LE29Zfgjuq3NZR2dgtPioJ40s= +k8s.io/cloud-provider v0.28.2/go.mod h1:40fqf6MtgYho5Eu4gkyLgh5abxU/QKTMTIwBxt4ILyU= +k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E= +k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc= +k8s.io/component-helpers v0.28.2 h1:r/XJ265PMirW9EcGXr/F+2yWrLPo2I69KdvcY/h9HAo= +k8s.io/component-helpers v0.28.2/go.mod h1:pF1R5YWQ+sgf0i6EbVm+MQCzkYuqutDUibdrkvAa6aI= +k8s.io/controller-manager v0.28.2 h1:C2RKx+NH3Iw+4yLdTGNJlYUd4cRV1N8tKl4XfqMwuTk= +k8s.io/controller-manager v0.28.2/go.mod h1:7bT6FlTE96Co7QevCtvcVnZZIJSaGj6F7EmyT2Rf3GY= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kms v0.27.3 h1:O6mZqi647ZLmxxkEv5Q9jMlmcXOh42CBD+A3MxI6zaQ= -k8s.io/kms v0.27.3/go.mod h1:VDfnSIK0dk5J+jasbe+kKpb3CQVwlcDeBLyq59P2KyY= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/kms v0.28.2 h1:KhG63LHopCdzs1oKA1j+NWleuIXudgOyCqJo4yi3GaM= +k8s.io/kms v0.28.2/go.mod h1:iAjgIqBrV2+8kmsjbbgUkAyKSuYq5g1dW9knpt6OhaE= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.3 h1:I3qQxpzWFcsU7IV/MENc5x125HxRtchsNPtE6Pu+bBc= diff --git a/helpers/run-in-test-cluster b/helpers/run-in-test-cluster index 395009b..01e175a 100755 --- a/helpers/run-in-test-cluster +++ b/helpers/run-in-test-cluster @@ -6,6 +6,7 @@ set -euo pipefail export ANSIBLE_CONFIG="$PWD"/k8test/ansible.cfg +export KUBERNETES="${KUBERNETES-latest}" # Prepares k8test with an existing virtual env, or a newly created on function ensure-k8test() { @@ -71,7 +72,8 @@ function ensure-inventory() { -e ssh_key=k8test/cluster/ssh.pub \ -e control_count=2 \ -e worker_count=2 \ - -e kubelet_extra_args='--cloud-provider=external' + -e kubelet_extra_args='--cloud-provider=external' \ + -e kubernetes="${KUBERNETES}" # Those won't really change between runs, so update them during install k8test/playbooks/update-secrets.yml \ diff --git a/pkg/cloudscale_ccm/cloud.go b/pkg/cloudscale_ccm/cloud.go index 7103021..ee53f92 100644 --- a/pkg/cloudscale_ccm/cloud.go +++ b/pkg/cloudscale_ccm/cloud.go @@ -9,30 +9,27 @@ import ( "strings" "time" - "github.com/cloudscale-ch/cloudscale-go-sdk" + cloudscale "github.com/cloudscale-ch/cloudscale-go-sdk/v4" "golang.org/x/oauth2" + "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" cloudprovider "k8s.io/cloud-provider" ) const ( - // Under no circumstances can this string change. It is for eterentiy. + // Under no circumstances can this string change. It is for eternity. ProviderName = "cloudscale" AccessToken = "CLOUDSCALE_ACCESS_TOKEN" ApiUrl = "CLOUDSCALE_API_URL" ApiTimeout = "CLOUDSCALE_API_TIMEOUT" - DefaultTimeout = time.Duration(5) * time.Second + DefaultTimeout = time.Duration(20) * time.Second ) // cloud implements cloudprovider.Interface type cloud struct { - // timeout used for the API access (informational only, changing it does - // not influence the active API client) - timeout time.Duration - - // CCM endpoints - instances *instances + instances *instances + loadbalancer *loadbalancer } // Register this provider with Kubernetes @@ -67,26 +64,22 @@ func newCloudscaleProvider(config io.Reader) (cloudprovider.Interface, error) { return nil, fmt.Errorf("no %s configured", AccessToken) } - // Always use a sensible timeout for operations, as the default is ∞ - timeout := func() time.Duration { - if seconds, _ := strconv.Atoi(os.Getenv(ApiTimeout)); seconds > 0 { - return time.Duration(seconds) * time.Second - } - return 5 * time.Second - }() - - client := newCloudscaleClient(token, timeout) + client := newCloudscaleClient(token, apiTimeout()) return &cloud{ - timeout: apiTimeout(), instances: &instances{ srv: serverMapper{client: client}, }, + loadbalancer: &loadbalancer{ + lbs: lbMapper{client: client}, + srv: serverMapper{client: client}, + }, }, nil } // newCloudscaleClient spawns a new cloudscale API client -func newCloudscaleClient(token string, timeout time.Duration) *cloudscale.Client { +func newCloudscaleClient( + token string, timeout time.Duration) *cloudscale.Client { tokenSource := oauth2.StaticTokenSource(&oauth2.Token{ AccessToken: token, @@ -105,53 +98,68 @@ func newCloudscaleClient(token string, timeout time.Duration) *cloudscale.Client return cloudscale.NewClient(httpClient) } -// Initialize provides the cloud with a kubernetes client builder and may spawn goroutines -// to perform housekeeping or run custom controllers specific to the cloud provider. -// Any tasks started here should be cleaned up when the stop channel closes. -func (c cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { +// Initialize provides the cloud with a kubernetes client builder and may spawn +// goroutines to perform housekeeping or run custom controllers specific to the +// cloud provider. Any tasks started here should be cleaned up when the stop +// channel closes. +func (c *cloud) Initialize( + clientBuilder cloudprovider.ControllerClientBuilder, + stop <-chan struct{}) { + + // This cannot be configured earlier, even though it seems better situated + // in newCloudscaleClient + c.loadbalancer.k8s = kubernetes.NewForConfigOrDie( + clientBuilder.ConfigOrDie("cloudscale-cloud-controller-manager")) } -// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise. -func (c cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { - return nil, false +// LoadBalancer returns a balancer interface. Also returns true if the +// interface is supported, false otherwise. +func (c *cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { + return c.loadbalancer, true } -// Instances returns an instances interface. Also returns true if the interface is supported, false otherwise. -func (c cloud) Instances() (cloudprovider.Instances, bool) { +// Instances returns an instances interface. Also returns true if the +// interface is supported, false otherwise. +func (c *cloud) Instances() (cloudprovider.Instances, bool) { return nil, false } -// InstancesV2 is an implementation for instances and should only be implemented by external cloud providers. -// Implementing InstancesV2 is behaviorally identical to Instances but is optimized to significantly reduce -// API calls to the cloud provider when registering and syncing nodes. Implementation of this interface will -// disable calls to the Zones interface. Also returns true if the interface is supported, false otherwise. -func (c cloud) InstancesV2() (cloudprovider.InstancesV2, bool) { +// InstancesV2 is an implementation for instances and should only be +// implemented by external cloud providers. Implementing InstancesV2 is +// behaviorally identical to Instances but is optimized to significantly +// reduce API calls to the cloud provider when registering and syncing nodes. +// Implementation of this interface will disable calls to the Zones interface. +// Also returns true if the interface is supported, false otherwise. +func (c *cloud) InstancesV2() (cloudprovider.InstancesV2, bool) { return c.instances, true } -// Zones returns a zones interface. Also returns true if the interface is supported, false otherwise. -// DEPRECATED: Zones is deprecated in favor of retrieving zone/region information from InstancesV2. -// This interface will not be called if InstancesV2 is enabled. -func (c cloud) Zones() (cloudprovider.Zones, bool) { +// Zones returns a zones interface. Also returns true if the interface is +// supported, false otherwise. DEPRECATED: Zones is deprecated in favor of +// retrieving zone/region information from InstancesV2. This interface will not +// be called if InstancesV2 is enabled. +func (c *cloud) Zones() (cloudprovider.Zones, bool) { return nil, false } -// Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise. -func (c cloud) Clusters() (cloudprovider.Clusters, bool) { +// Clusters returns a clusters interface. Also returns true if the interface +// is supported, false otherwise. +func (c *cloud) Clusters() (cloudprovider.Clusters, bool) { return nil, false } -// Routes returns a routes interface along with whether the interface is supported. -func (c cloud) Routes() (cloudprovider.Routes, bool) { +// Routes returns a routes interface along with whether the interface +// is supported. +func (c *cloud) Routes() (cloudprovider.Routes, bool) { return nil, false } // ProviderName returns the cloud provider ID. -func (c cloud) ProviderName() string { +func (c *cloud) ProviderName() string { return ProviderName } // HasClusterID returns true if a ClusterID is required and set -func (c cloud) HasClusterID() bool { +func (c *cloud) HasClusterID() bool { return false } diff --git a/pkg/cloudscale_ccm/cloud_test.go b/pkg/cloudscale_ccm/cloud_test.go index 00164fd..cea2573 100644 --- a/pkg/cloudscale_ccm/cloud_test.go +++ b/pkg/cloudscale_ccm/cloud_test.go @@ -43,25 +43,13 @@ func TestNewCloudscaleProviderWithBadConfig(t *testing.T) { } } -func TestNewCloudscaleProviderWithDefaultTimeout(t *testing.T) { - provider, _ := newCloudscaleProvider(nil) - if cs, _ := provider.(*cloud); cs.timeout != (5 * time.Second) { - t.Errorf("unexpected default timeout: %s", cs.timeout) - } +func TestDefaultTimeout(t *testing.T) { + timeout := apiTimeout() + assert.Equal(t, timeout, 20*time.Second) } -func TestNewCloudscaleProviderWithInvalidTimeout(t *testing.T) { - os.Setenv(ApiTimeout, "asdf") - provider, _ := newCloudscaleProvider(nil) - if cs, _ := provider.(*cloud); cs.timeout != (5 * time.Second) { - t.Errorf("unexpected fallback timeout: %s", cs.timeout) - } -} - -func TestNewCloudscaleProviderWithCustomTimeout(t *testing.T) { - os.Setenv(ApiTimeout, "10") - provider, _ := newCloudscaleProvider(nil) - if cs, _ := provider.(*cloud); cs.timeout != (10 * time.Second) { - t.Errorf("ignored %s: %s", ApiTimeout, cs.timeout) - } +func TestCustomTimeout(t *testing.T) { + os.Setenv(ApiTimeout, "5") + timeout := apiTimeout() + assert.Equal(t, timeout, 5*time.Second) } diff --git a/pkg/cloudscale_ccm/instances.go b/pkg/cloudscale_ccm/instances.go index 5b89d20..b42626f 100644 --- a/pkg/cloudscale_ccm/instances.go +++ b/pkg/cloudscale_ccm/instances.go @@ -21,10 +21,10 @@ type instances struct { func (i *instances) InstanceExists(ctx context.Context, node *v1.Node) ( bool, error) { - server, err := i.srv.findByNode(ctx, node).atMostOne() + server, err := i.srv.findByNode(ctx, node).AtMostOne() if err != nil { - return false, err + return false, fmt.Errorf("unable to find node %s: %w", node.Name, err) } if server == nil { @@ -50,10 +50,10 @@ func (i *instances) InstanceExists(ctx context.Context, node *v1.Node) ( func (i *instances) InstanceShutdown(ctx context.Context, node *v1.Node) ( bool, error) { - server, err := i.srv.findByNode(ctx, node).one() + server, err := i.srv.findByNode(ctx, node).One() if err != nil { - return false, err + return false, fmt.Errorf("unable to find node %s: %w", node.Name, err) } klog.InfoS( @@ -74,10 +74,10 @@ func (i *instances) InstanceShutdown(ctx context.Context, node *v1.Node) ( func (i *instances) InstanceMetadata(ctx context.Context, node *v1.Node) ( *cloudprovider.InstanceMetadata, error) { - server, err := i.srv.findByNode(ctx, node).one() + server, err := i.srv.findByNode(ctx, node).One() if err != nil { - return nil, err + return nil, fmt.Errorf("unable to find node %s: %w", node.Name, err) } id, err := uuid.Parse(server.UUID) diff --git a/pkg/cloudscale_ccm/instances_test.go b/pkg/cloudscale_ccm/instances_test.go index 4c26304..59851cc 100644 --- a/pkg/cloudscale_ccm/instances_test.go +++ b/pkg/cloudscale_ccm/instances_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/testkit" - "github.com/cloudscale-ch/cloudscale-go-sdk" + cloudscale "github.com/cloudscale-ch/cloudscale-go-sdk/v4" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" ) diff --git a/pkg/cloudscale_ccm/lb_mapper.go b/pkg/cloudscale_ccm/lb_mapper.go new file mode 100644 index 0000000..30d559f --- /dev/null +++ b/pkg/cloudscale_ccm/lb_mapper.go @@ -0,0 +1,76 @@ +package cloudscale_ccm + +import ( + "context" + "errors" + + "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/limiter" + cloudscale "github.com/cloudscale-ch/cloudscale-go-sdk/v4" +) + +// lbMapper maps cloudscale loadbalancers to Kubernetes services. +type lbMapper struct { + client *cloudscale.Client +} + +// findByServiceInfo returns loadbalancers matching the given service info +// (there may be multiple matches). +func (l *lbMapper) findByServiceInfo( + ctx context.Context, + serviceInfo *serviceInfo, +) *limiter.Limiter[cloudscale.LoadBalancer] { + + if uuid := serviceInfo.annotation(LoadBalancerUUID); uuid != "" { + return l.getByUUID(ctx, uuid) + } + + return l.findByName(ctx, serviceInfo.annotation(LoadBalancerName)) +} + +func (l *lbMapper) getByUUID( + ctx context.Context, + uuid string, +) *limiter.Limiter[cloudscale.LoadBalancer] { + + server, err := l.client.LoadBalancers.Get(ctx, uuid) + if err != nil { + var response *cloudscale.ErrorResponse + + if errors.As(err, &response) && response.StatusCode == 404 { + return limiter.New[cloudscale.LoadBalancer](nil) + } + + return limiter.New[cloudscale.LoadBalancer](err) + } + + return limiter.New[cloudscale.LoadBalancer](nil, *server) +} + +// findByName returns loadbalancers matching the given name (there may be +// multiple matches). +func (l *lbMapper) findByName( + ctx context.Context, + name string, +) *limiter.Limiter[cloudscale.LoadBalancer] { + + if name == "" { + return limiter.New[cloudscale.LoadBalancer]( + errors.New("no load balancer with empty name found")) + } + + lbs, err := l.client.LoadBalancers.List(ctx) + if err != nil { + return limiter.New[cloudscale.LoadBalancer](err) + } + + matches := []cloudscale.LoadBalancer{} + for _, lb := range lbs { + l := lb + + if l.Name == name { + matches = append(matches, l) + } + } + + return limiter.New[cloudscale.LoadBalancer](nil, matches...) +} diff --git a/pkg/cloudscale_ccm/lb_mapper_test.go b/pkg/cloudscale_ccm/lb_mapper_test.go new file mode 100644 index 0000000..5a736e0 --- /dev/null +++ b/pkg/cloudscale_ccm/lb_mapper_test.go @@ -0,0 +1,54 @@ +package cloudscale_ccm + +import ( + "context" + "testing" + + "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/testkit" + "github.com/cloudscale-ch/cloudscale-go-sdk/v4" + "github.com/stretchr/testify/assert" +) + +func TestFindLoadBalancer(t *testing.T) { + server := testkit.NewMockAPIServer() + server.WithLoadBalancers([]cloudscale.LoadBalancer{ + {UUID: "c2e4aabd-8c91-46da-b069-71e01f439806", Name: "foo"}, + {UUID: "096c58ff-41c5-44fa-9ba3-05defce2062a", Name: "clone"}, + {UUID: "85dffa20-8097-4d75-afa6-9e4372047ce6", Name: "clone"}, + }) + server.Start() + defer server.Close() + + mapper := lbMapper{client: server.Client()} + + s := testkit.NewService("service").V1() + i := newServiceInfo(s, "") + + // Neither name nor uuid given + lbs := mapper.findByServiceInfo(context.Background(), i) + assert.NoError(t, lbs.None()) + + // Using a unique name + s.Annotations = make(map[string]string) + s.Annotations[LoadBalancerName] = "foo" + + lbs = mapper.findByServiceInfo(context.Background(), i) + lb, err := lbs.One() + assert.NoError(t, err) + assert.Equal(t, "foo", lb.Name) + + // Using an ambiguous name + s.Annotations[LoadBalancerName] = "clone" + + lbs = mapper.findByServiceInfo(context.Background(), i) + _, err = lbs.One() + assert.Error(t, err) + + // Using a uuid + s.Annotations[LoadBalancerUUID] = "85dffa20-8097-4d75-afa6-9e4372047ce6" + + lbs = mapper.findByServiceInfo(context.Background(), i) + lb, err = lbs.One() + assert.NoError(t, err) + assert.Equal(t, "clone", lb.Name) +} diff --git a/pkg/cloudscale_ccm/limiter_test.go b/pkg/cloudscale_ccm/limiter_test.go deleted file mode 100644 index d24c898..0000000 --- a/pkg/cloudscale_ccm/limiter_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package cloudscale_ccm - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestError(t *testing.T) { - lim := newLimiter[string](errors.New("fail"), "foo") - - v, err := lim.one() - assert.Error(t, err) - assert.Nil(t, v) -} - -func TestFoundOne(t *testing.T) { - lim := newLimiter[string](nil, "foo") - - v, err := lim.one() - assert.NoError(t, err) - assert.Equal(t, "foo", *v) -} - -func TestNotFoundOne(t *testing.T) { - lim := newLimiter[string](nil) - - v, err := lim.one() - assert.Error(t, err) - assert.Nil(t, v) -} - -func TestAtMostOneEmpty(t *testing.T) { - lim := newLimiter[string](nil) - - v, err := lim.atMostOne() - assert.NoError(t, err) - assert.Nil(t, v) -} - -func TestAtMostOne(t *testing.T) { - lim := newLimiter[string](nil, "foo") - - v, err := lim.atMostOne() - assert.NoError(t, err) - assert.Equal(t, "foo", *v) -} - -func TestAtMostOneTooMany(t *testing.T) { - lim := newLimiter[string](nil, "foo", "bar") - - v, err := lim.atMostOne() - assert.Error(t, err) - assert.Nil(t, v) -} - -func TestNone(t *testing.T) { - lim := newLimiter[string](nil) - assert.Nil(t, lim.none()) -} - -func TestNoneNotEmpty(t *testing.T) { - lim := newLimiter[string](nil, "foo") - assert.Error(t, lim.none()) -} diff --git a/pkg/cloudscale_ccm/loadbalancer.go b/pkg/cloudscale_ccm/loadbalancer.go new file mode 100644 index 0000000..a26232a --- /dev/null +++ b/pkg/cloudscale_ccm/loadbalancer.go @@ -0,0 +1,372 @@ +package cloudscale_ccm + +import ( + "context" + "fmt" + + "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/kubeutil" + "github.com/cloudscale-ch/cloudscale-go-sdk/v4" + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" +) + +// Annotations used by the loadbalancer integration of cloudscale_ccm. Those +// are pretty much set in stone, once they are in a release, so do not change +// them, unless you know what you are doing. +const ( + // LoadBalancerUUID uniquely identifes the loadbalancer. This annotation + // should not be provided by the customer, unless the adoption of an + // existing load balancer is desired. + // + // In all other cases, this value is set by the CCM after creating the + // load balancer, to ensure that we track it with a proper ID and not + // a name that might change without our knowledge. + LoadBalancerUUID = "k8s.cloudscale.ch/loadbalancer-uuid" + + // LoadBalancerConfigVersion is set by the CCM when it first handles a + // service. It exists to allow future CCM changes and should not be + // tampered with. Once set, it is not changed, unless there is an upgrade + // path applied by the CCM. + LoadBalancerConfigVersion = "k8s.cloudscale.ch/loadbalancer-config-version" + + // LoadBalancerName names the loadbalancer on creation, and renames it + // later. Note that if the LoadBalancerUUID annotation exists, it takes + // precedence over the name to match the load balancer. + // + // This annotation can be changed without downtime on an esablished + // service, but it is not recommended. + LoadBalancerName = "k8s.cloudscale.ch/loadbalancer-name" + + // LoadBalancerFlavor denotes the flavor used by the balancer. There is + // currently only one flavor, lb-standard. + // + // This can currently not be changed and will cause an error if attempted. + LoadBalancerFlavor = "k8s.cloudscale.ch/loadbalancer-flavor" + + // LoadBalancerZone defines the zone in which the load balancer is running. + // This defaults to the zone of the Nodes (if there is only one). + // + // This can not be changed once the service is created. + LoadBalancerZone = "k8s.cloudscale.ch/loadbalancer-zone" + + // LoadBalancerPoolAlgorithm defines the load balancing algorithm used + // by the loadbalancer. See the API documentation for more information: + // + // https://www.cloudscale.ch/en/api/v1#pool-algorithms + // + // Defaults to `round_robin`. + // + // Changing this algorithm will on an established service causes downtime, + // as all pools have to be recreated. + LoadBalancerPoolAlgorithm = "k8s.cloudscale.ch/loadbalancer-pool-algorithm" + + // LoadBalancerPoolProtocol defines the protocol for all the pools of the + // service. We are technically able to have different protocols for + // different ports in a service, but as our options apart from `tcp` are + // currently `proxy` and `proxyv2`, we go with Kubernetes's recommendation + // to apply these protocols to all incoming connections the same way: + // + // https://kubernetes.io/docs/reference/networking/service-protocols/#protocol-proxy-special + // + // Supported protocols: + // + // https://www.cloudscale.ch/en/api/v1#pool-protocols + // + // An alternative approach might be to use the spec.ports.appService on the + // service, with custom strings, should anyone require such a feature. + // + // Changing the pool protocol on an established service causes downtime, + // as all pools have to be recreated. + LoadBalancerPoolProtocol = "k8s.cloudscale.ch/loadbalancer-pool-protocol" + + // LoadBalancerHealthMonitorDelayS is the delay between two successive + // checks, in seconds. Defaults to 2. + // + // Changing this annotation on an active service may lead to new + // connections timing out while the monitor is updated. + LoadBalancerHealthMonitorDelayS = "k8s.cloudscale.ch/loadbalancer-health-monitor-delay-s" + + // LoadBalancerHealthMonitorTimeoutS is the maximum time allowed for an + // individual check, in seconds. Defaults to 1. + // + // Changing this annotation on an active service may lead to new + // connections timing out while the monitor is updated. + LoadBalancerHealthMonitorTimeoutS = "k8s.cloudscale.ch/loadbalancer-health-monitor-timeout-s" + + // LoadBalancerHealthMonitorDownThreshold is the number of the checks that + // need to succeed before a pool member is considered up. Defaults to 2. + LoadBalancerHealthMonitorUpThreshold = "k8s.cloudscale.ch/loadbalancer-health-monitor-up-threshold" + + // LoadBalancerHealthMonitorDownThreshold is the number of the checks that + // need to fail before a pool member is considered down. Defaults to 3. + // + // Changing this annotation on an active service may lead to new + // connections timing out while the monitor is updated. + LoadBalancerHealthMonitorDownThreshold = "k8s.cloudscale.ch/loadbalancer-health-monitor-down-threshold" + + // LoadBalancerHealthMonitorType defines the approach the monitor takes. + // (ping, tcp, http, https, tls-hello). + // + // See https://www.cloudscale.ch/en/api/v1#health-monitor-types + // + // Changing this annotation on an active service may lead to new + // connections timing out while the monitor is recreated. + LoadBalancerHealthMonitorType = "k8s.cloudscale.ch/loadbalancer-health-monitor-type" + + // LoadBalancerHealthMonitorHTTP configures details about the HTTP check. + // + // See https://www.cloudscale.ch/en/api/v1#http-attribute-specification + // + // Changing this annotation on an active service may lead to new + // connections timing out while the monitor is updated. + LoadBalancerHealthMonitorHTTP = "k8s.cloudscale.ch/loadbalancer-health-monitor-http" + + // LoadBalancerListenerProtocol defines the protocol used by the listening + // port on the loadbalancer. Currently, only tcp is supported. + // + // See https://www.cloudscale.ch/en/api/v1#listener-protocols + // + // Changing this annotation on an established service may cause downtime + // as the listeners are recreated. + LoadBalancerListenerProtocol = "k8s.cloudscale.ch/loadbalancer-listener-protocol" + + // LoadBalancerListenerAllowedCIDRs is a JSON list of IP addresses that + // should be allowed to access the load balancer. For example: + // + // * `[]` means that anyone is allowed to connect (default). + // * `["1.1.1.1", "8.8.8.8"]` only the given addresses are allowed. + // + // Changing this annotation on an established service is considered safe. + LoadBalancerListenerAllowedCIDRs = "k8s.cloudscale.ch/loadbalancer-listener-allowed-cidrs" + + // LoadBalancerListenerTimeoutClientDataMS denotes the milliseconds until + // inactive client connections are dropped. + // + // Changing this annotation on an established service is considered safe. + LoadBalancerListenerTimeoutClientDataMS = "k8s.cloudscale.ch/loadbalancer-timeout-client-data-ms" + + // LoadBalancerListenerTimeoutMemberConnectMS denotes the milliseconds + // it should maximally take to connect to a pool member, before the + // attempt is aborted. + // + // Changing this annotation on an established service is considered safe. + LoadBalancerListenerTimeoutMemberConnectMS = "k8s.cloudscale.ch/loadbalancer-timeout-member-connect-ms" + + // LoadBalancerListenerTimeoutMemberDataMS denotes the milliseconds until + // an inactive connection to a pool member is dropped. + // + // Changing this annotation on an established service is considered safe. + LoadBalancerListenerTimeoutMemberDataMS = "k8s.cloudscale.ch/loadbalancer-timeout-member-data-ms" +) + +type loadbalancer struct { + lbs lbMapper + srv serverMapper + k8s kubernetes.Interface +} + +// GetLoadBalancer returns whether the specified load balancer exists, and +// if so, what its status is. +// +// Implementations must treat the *v1.Service parameter as read-only and not +// modify it. +// +// Parameter 'clusterName' is the name of the cluster as presented to +// kube-controller-manager. +func (l *loadbalancer) GetLoadBalancer( + ctx context.Context, + clusterName string, + service *v1.Service, +) (status *v1.LoadBalancerStatus, exists bool, err error) { + + serviceInfo := newServiceInfo(service, clusterName) + if supported, _ := serviceInfo.isSupported(); !supported { + return nil, false, nil + } + + instance, err := l.lbs.findByServiceInfo(ctx, serviceInfo).AtMostOne() + + if err != nil { + return nil, false, fmt.Errorf( + "unable to get load balancer for %s: %w", service.Name, err) + } + + if instance == nil { + klog.InfoS( + "loadbalancer does not exist", + "Name", serviceInfo.annotation(LoadBalancerName), + "Service", service.Name, + ) + + return nil, false, nil + } + + return loadBalancerStatus(instance), true, nil +} + +// GetLoadBalancerName returns the name of the load balancer. Implementations +// must treat the *v1.Service parameter as read-only and not modify it. +func (lb *loadbalancer) GetLoadBalancerName( + ctx context.Context, + clusterName string, + service *v1.Service, +) string { + name := newServiceInfo(service, clusterName).annotation(LoadBalancerName) + + klog.InfoS( + "loaded loadbalancer name for service", + "Name", name, + "Service", service.Name, + ) + + return name +} + +// EnsureLoadBalancer creates a new load balancer 'name', or updates the +// existing one. Returns the status of the balancer. Implementations must treat +// the *v1.Service and *v1.Node parameters as read-only and not modify them. +// +// Parameter 'clusterName' is the name of the cluster as presented to +// kube-controller-manager. +// +// Implementations may return a (possibly wrapped) api.RetryError to enforce +// backing off at a fixed duration. This can be used for cases like when the +// load balancer is not ready yet (e.g., it is still being provisioned) and +// polling at a fixed rate is preferred over backing off exponentially in +// order to minimize latency. +func (l *loadbalancer) EnsureLoadBalancer( + ctx context.Context, + clusterName string, + service *v1.Service, + nodes []*v1.Node, +) (*v1.LoadBalancerStatus, error) { + + // Skip if the service is not supported by this CCM + serviceInfo := newServiceInfo(service, clusterName) + if supported, err := serviceInfo.isSupported(); !supported { + return nil, err + } + + // Reconcile + err := reconcileLbState(ctx, l.lbs.client, func() (*lbState, error) { + // Get the desired state from Kubernetes + servers, err := l.srv.mapNodes(ctx, nodes).All() + if err != nil { + return nil, fmt.Errorf( + "unable to get load balancer for %s: %w", service.Name, err) + } + + return desiredLbState(serviceInfo, nodes, servers) + }, func() (*lbState, error) { + // Get the current state from cloudscale.ch + return actualLbState(ctx, &l.lbs, serviceInfo) + }) + + if err != nil { + return nil, err + } + + // Get the final state to show the status + actual, err := actualLbState(ctx, &l.lbs, serviceInfo) + if err != nil { + return nil, err + } + + // At creation annotate the service with necessary data + version := serviceInfo.annotation(LoadBalancerConfigVersion) + + err = kubeutil.AnnotateService(ctx, l.k8s, serviceInfo.Service, + LoadBalancerUUID, actual.lb.UUID, + LoadBalancerConfigVersion, version, + LoadBalancerZone, actual.lb.Zone.Slug, + ) + if err != nil { + return nil, fmt.Errorf( + "unable to annotate service %s: %w", service.Name, err) + } + + return loadBalancerStatus(actual.lb), nil +} + +// UpdateLoadBalancer updates hosts under the specified load balancer. +// Implementations must treat the *v1.Service and *v1.Node +// parameters as read-only and not modify them. +// +// Parameter 'clusterName' is the name of the cluster as presented to +// kube-controller-manager. +func (l *loadbalancer) UpdateLoadBalancer( + ctx context.Context, + clusterName string, + service *v1.Service, + nodes []*v1.Node, +) error { + + // Skip if the service is not supported by this CCM + serviceInfo := newServiceInfo(service, clusterName) + if supported, err := serviceInfo.isSupported(); !supported { + return err + } + + // Reconcile + return reconcileLbState(ctx, l.lbs.client, func() (*lbState, error) { + // Get the desired state from Kubernetes + servers, err := l.srv.mapNodes(ctx, nodes).All() + if err != nil { + return nil, fmt.Errorf( + "unable to get load balancer for %s: %w", service.Name, err) + } + + return desiredLbState(serviceInfo, nodes, servers) + }, func() (*lbState, error) { + // Get the current state from cloudscale.ch + return actualLbState(ctx, &l.lbs, serviceInfo) + }) +} + +// EnsureLoadBalancerDeleted deletes the specified load balancer if it +// exists, returning nil if the load balancer specified either didn't exist or +// was successfully deleted. +// +// This construction is useful because many cloud providers' load balancers +// have multiple underlying components, meaning a Get could say that the lb +// doesn't exist even if some part of it is still laying around. +// +// Implementations must treat the *v1.Service parameter as read-only and not +// modify it. +// +// Parameter 'clusterName' is the name of the cluster as presented to +// kube-controller-manager. +func (l *loadbalancer) EnsureLoadBalancerDeleted( + ctx context.Context, + clusterName string, + service *v1.Service, +) error { + + // Skip if the service is not supported by this CCM + serviceInfo := newServiceInfo(service, clusterName) + if supported, err := serviceInfo.isSupported(); !supported { + return err + } + + // Reconcile with a desired state of "nothing" + return reconcileLbState(ctx, l.lbs.client, func() (*lbState, error) { + return &lbState{}, nil + }, func() (*lbState, error) { + return actualLbState(ctx, &l.lbs, serviceInfo) + }) +} + +// loadBalancerStatus generates the v1.LoadBalancerStatus for the given +// loadbalancer, as required by Kubernetes. +func loadBalancerStatus(lb *cloudscale.LoadBalancer) *v1.LoadBalancerStatus { + + status := v1.LoadBalancerStatus{} + status.Ingress = make([]v1.LoadBalancerIngress, len(lb.VIPAddresses)) + + for i, address := range lb.VIPAddresses { + status.Ingress[i].IP = address.Address + } + + return &status +} diff --git a/pkg/cloudscale_ccm/reconcile.go b/pkg/cloudscale_ccm/reconcile.go new file mode 100644 index 0000000..ac9560e --- /dev/null +++ b/pkg/cloudscale_ccm/reconcile.go @@ -0,0 +1,896 @@ +package cloudscale_ccm + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math/rand" + "slices" + "strings" + "time" + + "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/actions" + "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/compare" + "github.com/cloudscale-ch/cloudscale-go-sdk/v4" + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +type lbState struct { + lb *cloudscale.LoadBalancer + + // Pool pointers are used to refer to members by pool, therefore use a + // pointer here as well, to not accidentally copy the struct. + pools []*cloudscale.LoadBalancerPool + members map[*cloudscale.LoadBalancerPool][]cloudscale. + LoadBalancerPoolMember + monitors map[*cloudscale.LoadBalancerPool][]cloudscale. + LoadBalancerHealthMonitor + + // Though not currently used that way, listeners are not + // necessarily bound to any given pool. + listeners map[*cloudscale.LoadBalancerPool][]cloudscale. + LoadBalancerListener +} + +func newLbState(lb *cloudscale.LoadBalancer) *lbState { + return &lbState{ + lb: lb, + pools: make([]*cloudscale.LoadBalancerPool, 0), + members: make( + map[*cloudscale.LoadBalancerPool][]cloudscale.LoadBalancerPoolMember), + monitors: make( + map[*cloudscale.LoadBalancerPool][]cloudscale.LoadBalancerHealthMonitor), + listeners: make( + map[*cloudscale.LoadBalancerPool][]cloudscale.LoadBalancerListener), + } +} + +// desiredLbState computes the state we want to see with the given service +// and nodes. Note that nodes/servers should be a 1:1 mapping, so that +// the first node points to the first server, and so on. +func desiredLbState( + serviceInfo *serviceInfo, + nodes []*v1.Node, + servers []cloudscale.Server, +) (*lbState, error) { + + // This would indicate a programming error somewhere + if len(nodes) != len(servers) { + return nil, fmt.Errorf("bad node to server mapping") + } + + // Get the zone of the load balancer, either from annotation, or by + // looking at the nodes. + zone := serviceInfo.annotation(LoadBalancerZone) + if zone == "" { + for _, s := range servers { + if zone != "" && zone != s.Zone.Slug { + return nil, fmt.Errorf( + "no loadbalancer zone set and nodes in multiple zones", + ) + } + zone = s.Zone.Slug + } + } + + s := newLbState(&cloudscale.LoadBalancer{ + Name: serviceInfo.annotation(LoadBalancerName), + VIPAddresses: []cloudscale.VIPAddress{}, + Flavor: cloudscale.LoadBalancerFlavorStub{ + Slug: serviceInfo.annotation(LoadBalancerFlavor), + }, + ZonalResource: cloudscale.ZonalResource{ + Zone: cloudscale.Zone{Slug: zone}, + }, + }) + + // Each service port gets its own pool + algorithm := serviceInfo.annotation(LoadBalancerPoolAlgorithm) + protocol := serviceInfo.annotation(LoadBalancerPoolProtocol) + + for _, port := range serviceInfo.Service.Spec.Ports { + + if port.Protocol != "TCP" { + return nil, fmt.Errorf( + "service %s: cannot use %s for %d, only TCP is supported", + serviceInfo.Service.Name, + port.Protocol, + port.Port) + } + + nodePort := int(port.NodePort) + if nodePort == 0 { + return nil, fmt.Errorf( + "service %s: unknown port: %d", + serviceInfo.Service.Name, + port.NodePort) + } + + monitorPort := nodePort + if serviceInfo.Service.Spec.ExternalTrafficPolicy == "Local" { + if serviceInfo.Service.Spec.HealthCheckNodePort > 0 { + monitorPort = int(serviceInfo.Service.Spec.HealthCheckNodePort) + } + } + + pool := cloudscale.LoadBalancerPool{ + Name: poolName(port.Protocol, port.Port), + Algorithm: algorithm, + Protocol: protocol, + } + s.pools = append(s.pools, &pool) + + // For each server and private address, we need to add a pool member + for _, server := range servers { + for _, iface := range server.Interfaces { + + // There's currently no support to load balance "to public" + if iface.Type == "public" { + continue + } + + // Create a pool member for each address + for _, addr := range iface.Addresses { + + name := poolMemberName(addr.Address, nodePort) + s.members[&pool] = append(s.members[&pool], + cloudscale.LoadBalancerPoolMember{ + Name: name, + Enabled: true, + Address: addr.Address, + Subnet: addr.Subnet, + ProtocolPort: nodePort, + MonitorPort: monitorPort, + }, + ) + } + } + } + + // If there are no pool members, return an error. It would be possible + // to just put a load balancer up that has no function, but it seems + // more useful to err instead, as there's likely something wrong. + if len(s.members[&pool]) == 0 { + return nil, fmt.Errorf( + "service %s: no private address found on any node", + serviceInfo.Service.Name) + } + + // Add a health monitor for each pool + monitor, err := healthMonitorForPort(serviceInfo) + if err != nil { + return nil, err + } + + s.monitors[&pool] = append(s.monitors[&pool], *monitor) + + // Add a listener for each pool + listener, err := listenerForPort(serviceInfo, int(port.Port)) + if err != nil { + return nil, err + } + + s.listeners[&pool] = append(s.listeners[&pool], *listener) + } + + return s, nil +} + +func actualLbState( + ctx context.Context, + l *lbMapper, + serviceInfo *serviceInfo, +) (*lbState, error) { + + // Get the loadbalancer + lb, err := l.findByServiceInfo(ctx, serviceInfo).AtMostOne() + if err != nil { + return nil, fmt.Errorf( + "unable to get load balancer for %s: %w", + serviceInfo.Service.Name, err) + } + if lb == nil { + return &lbState{}, nil + } + + s := newLbState(lb) + + // Keep track of pool UUIDs (this can be removed once the load balancer + // info is included in listener/monitor list calls). + poolUUIDs := make(map[string]bool) + + // Load all monitors/listeners first (may be from other load balancers) + monitors, err := l.client.LoadBalancerHealthMonitors.List(ctx) + if err != nil { + return nil, fmt.Errorf( + "lb state: failed to load monitors: %w", err) + } + + listeners, err := l.client.LoadBalancerListeners.List(ctx) + if err != nil { + return nil, fmt.Errorf( + "lb state: failed to load listeners: %w", err) + } + + // Gather pools and members + pools, err := l.client.LoadBalancerPools.List(ctx) + if err != nil { + return nil, fmt.Errorf( + "lb state: failed to load pools: %w", err) + } + + for _, pool := range pools { + p := pool + + if p.LoadBalancer.UUID != lb.UUID { + continue + } + + s.pools = append(s.pools, &p) + poolUUIDs[p.UUID] = true + + s.members[&p], err = l.client.LoadBalancerPoolMembers.List(ctx, p.UUID) + if err != nil { + return nil, fmt.Errorf( + "lbstate: failed to load members for %s: %w", p.UUID, err) + } + + for _, m := range monitors { + if m.Pool.UUID != p.UUID { + continue + } + + s.monitors[&p] = append(s.monitors[&p], m) + } + + for _, l := range listeners { + if l.Pool == nil || l.Pool.UUID != p.UUID { + continue + } + + s.listeners[&p] = append(s.listeners[&p], l) + } + } + + // Add free floating listeners (maybe used in the future) + for _, l := range listeners { + if l.Pool != nil || l.LoadBalancer.UUID != lb.UUID { + continue + } + + s.listeners[nil] = append(s.listeners[nil], l) + } + + return s, nil +} + +// nextLbActions returns a list of actions to take to ensure a desired +// loadbalancer state is reached. +func nextLbActions( + desired *lbState, actual *lbState) ([]actions.Action, error) { + + next := make([]actions.Action, 0) + + // Some state has to be given, even if empty + if desired == nil { + return next, errors.New("no desired state given") + } + + if actual == nil { + return next, errors.New("no desired state given") + } + + delete := func(url string) { + next = append(next, + actions.DeleteResource(url), + actions.Sleep(500*time.Millisecond)) + } + + // Keys define the values that cause an item to be recreated. If the key + // of an actual item is not found in the desired list, it is dropped. If + // the key of a desired item does not exit, it is created. + poolKey := func(p *cloudscale.LoadBalancerPool) string { + return fmt.Sprint( + p.Name, + p.Algorithm, + p.Protocol, + ) + } + + poolMemberKey := func(m cloudscale.LoadBalancerPoolMember) string { + return fmt.Sprintf( + m.Name, + m.Enabled, + m.MonitorPort, + m.ProtocolPort, + m.Address, + m.Subnet, + ) + } + + listenerKey := func(l cloudscale.LoadBalancerListener) string { + return fmt.Sprintf( + l.Name, + l.Protocol, + l.ProtocolPort, + ) + } + + monitorKey := func(m cloudscale.LoadBalancerHealthMonitor) string { + httpVersion := "1.1" + + if m.HTTP != nil && m.HTTP.Version != "" { + httpVersion = m.HTTP.Version + } + + return fmt.Sprintf( + m.Type, + httpVersion, + ) + } + + // If no lb is desired, and there is none, stop + if desired.lb == nil && actual.lb == nil { + return next, nil + } + + // If an lb is desired, and there is none, create one. This always causes + // a re-evaluation and we'll be called again with an existing lb. + if desired.lb != nil && actual.lb == nil { + next = append(next, + actions.CreateLb(desired.lb), + actions.Refetch(), + ) + + return next, nil + } + + // No matter what happens next, we need an lb that is ready + next = append(next, actions.AwaitLb(actual.lb)) + + // If the lb should be deleted, do so (causes a cascade) + if desired.lb == nil && actual.lb != nil { + next = append(next, actions.DeleteResource(actual.lb.HREF)) + return next, nil + } + + // If the lb requires other changes, inform the user that they need to + // recreate the service themselves. + if len(desired.lb.VIPAddresses) > 0 { + if !slices.Equal(desired.lb.VIPAddresses, actual.lb.VIPAddresses) { + return nil, fmt.Errorf( + "VIP addresses for %s changed, please re-create the service", + actual.lb.HREF, + ) + } + } + + if desired.lb.Flavor.Slug != actual.lb.Flavor.Slug { + return nil, fmt.Errorf( + "flavor for %s changed, please configure the previous flavor "+ + "or contact support", + actual.lb.HREF, + ) + } + + if desired.lb.Zone.Slug != actual.lb.Zone.Slug { + return nil, fmt.Errorf( + "zone for %s changed, please configure the previous zone "+ + "or contact support", + actual.lb.HREF, + ) + } + + // If the name of the lb is wrong, change it + if desired.lb.Name != actual.lb.Name { + next = append(next, actions.RenameLb(actual.lb.UUID, actual.lb.Name)) + } + + // All other changes are applied aggressively, as the customer would have + // to do that manually anyway by recreating the service, which would be + // more disruptive. + poolsToDelete, poolsToCreate := compare.Diff[*cloudscale.LoadBalancerPool]( + desired.pools, + actual.pools, + poolKey, + ) + + // Remove undesired pools + for _, p := range poolsToDelete { + for _, m := range actual.members[p] { + delete(m.HREF) + } + delete(p.HREF) + } + + // Create missing pools + for _, p := range poolsToCreate { + next = append(next, actions.CreatePool(actual.lb.UUID, p)) + } + + // If there have been pool changes, refresh + if len(poolsToDelete) > 0 || len(poolsToCreate) > 0 { + next = append(next, actions.Refetch()) + return next, nil + } + + // Update pool members + actualPools := actual.poolsByName() + actionCount := len(next) + + for _, d := range desired.pools { + a := actualPools[d.Name] + + // This would indicate a programming error above + if a == nil { + return nil, fmt.Errorf("no existing pool found for %s", d.Name) + } + + // Delete and create pool members + msToDelete, msToCreate := compare.Diff( + desired.members[d], + actual.members[a], + poolMemberKey, + ) + + for _, m := range msToDelete { + member := m + delete(member.HREF) + } + + if len(msToDelete) > 0 && len(msToCreate) > 0 { + next = append(next, actions.Sleep(5*time.Second)) + } + + for _, m := range msToCreate { + member := m + next = append(next, actions.CreatePoolMember(a.UUID, &member)) + } + + // Delete and create listeners + lsToDelete, lsToCreate := compare.Diff( + desired.listeners[d], + actual.listeners[a], + listenerKey, + ) + + for _, l := range lsToDelete { + listener := l + delete(listener.HREF) + } + + if len(lsToDelete) > 0 && len(lsToCreate) > 0 { + next = append(next, actions.Sleep(5*time.Second)) + } + + for _, l := range lsToCreate { + listener := l + next = append(next, actions.CreateListener(a.UUID, &listener)) + } + + // Delete and create monitors + monToDelete, monToCreate := compare.Diff( + desired.monitors[d], + actual.monitors[a], + monitorKey, + ) + + for _, m := range monToDelete { + mon := m + delete(mon.HREF) + } + + if len(monToDelete) > 0 && len(monToCreate) > 0 { + next = append(next, actions.Sleep(5*time.Second)) + } + + for _, m := range monToCreate { + mon := m + next = append(next, actions.CreateHealthMonitor(a.UUID, &mon)) + } + } + + // If there have been member changes, refresh + if actionCount < len(next) { + next = append(next, actions.Refetch()) + return next, nil + } + + // Update the listeners and monitors that do not need to be recreated + for _, d := range desired.pools { + a := actualPools[d.Name] + + listeners := compare.Match( + desired.listeners[d], + actual.listeners[a], + listenerKey, + ) + + for _, match := range listeners { + dl := match[0] + al := match[1] + + if !slices.Equal(dl.AllowedCIDRs, al.AllowedCIDRs) { + next = append(next, actions.UpdateListenerAllowedCIDRs( + al.UUID, + dl.AllowedCIDRs, + )) + } + + if dl.TimeoutClientDataMS != al.TimeoutClientDataMS { + next = append(next, actions.UpdateListenerTimeout( + al.UUID, + dl.TimeoutClientDataMS, + "client-data-ms", + )) + } + + if dl.TimeoutMemberConnectMS != al.TimeoutMemberConnectMS { + next = append(next, actions.UpdateListenerTimeout( + al.UUID, + dl.TimeoutMemberConnectMS, + "member-connect-ms", + )) + } + + if dl.TimeoutMemberDataMS != al.TimeoutMemberDataMS { + next = append(next, actions.UpdateListenerTimeout( + al.UUID, + dl.TimeoutMemberDataMS, + "member-data-ms", + )) + } + } + + monitors := compare.Match( + desired.monitors[d], + actual.monitors[a], + monitorKey, + ) + + for _, match := range monitors { + dm := match[0] + am := match[1] + + if dm.HTTP != nil && am.HTTP != nil { + if dm.HTTP.Host != am.HTTP.Host { + next = append(next, actions.UpdateMonitorHTTPHost( + am.UUID, + dm.HTTP.Host, + )) + } + + if dm.HTTP.UrlPath != am.HTTP.UrlPath { + next = append(next, actions.UpdateMonitorHTTPPath( + am.UUID, + dm.HTTP.UrlPath, + )) + } + + if dm.HTTP.Method != am.HTTP.Method { + next = append(next, actions.UpdateMonitorHTTPMethod( + am.UUID, + dm.HTTP.Method, + )) + } + + if !slices.Equal( + dm.HTTP.ExpectedCodes, am.HTTP.ExpectedCodes) { + + next = append(next, actions.UpdateMonitorHTTPExpectedCodes( + am.UUID, + dm.HTTP.ExpectedCodes, + )) + } + } + + if dm.DelayS != am.DelayS { + next = append(next, actions.UpdateMonitorNumber( + am.UUID, + dm.DelayS, + "delay-s", + )) + } + + if dm.TimeoutS != am.TimeoutS { + next = append(next, actions.UpdateMonitorNumber( + am.UUID, + dm.TimeoutS, + "timeout-s", + )) + } + + if dm.UpThreshold != am.UpThreshold { + next = append(next, actions.UpdateMonitorNumber( + am.UUID, + dm.UpThreshold, + "up-threshold", + )) + } + + if dm.DownThreshold != am.DownThreshold { + next = append(next, actions.UpdateMonitorNumber( + am.UUID, + dm.DownThreshold, + "down-threshold", + )) + } + } + } + + return next, nil +} + +// reconcileLbState reconciles an actual load balancer state with a desired +// one. During reconciliation, the state may have to be re-fetche, which is why +// functions are used. They are expected not to cache their results. +func reconcileLbState( + ctx context.Context, + client *cloudscale.Client, + desiredState func() (*lbState, error), + actualState func() (*lbState, error), +) error { + + for { + // Get the states + desired, err := desiredState() + if err != nil { + return err + } + + actual, err := actualState() + if err != nil { + return err + } + + // Get the actions necessary to get to the desired state + next, err := nextLbActions(desired, actual) + if err != nil { + return err + } + + updateState, err := runActions(ctx, client, next) + if err != nil { + return err + } + + if !updateState { + break + } + + // Wait between 5-7.5 seconds between state fetches + wait := time.Duration(5000+rand.Intn(2500)) * time.Millisecond + + select { + case <-ctx.Done(): + return fmt.Errorf("action has been aborted") + case <-time.After(wait): + continue + } + } + + return nil +} + +// runActions executes the given actions and returns the result, together +// with a boolean set to true, if additional actions are necessary. +func runActions( + ctx context.Context, + client *cloudscale.Client, + next []actions.Action, +) (bool, error) { + + for _, action := range next { + + // Abort the actions if the context has been cancelled, to avoid + // noop-ing a bunch of individual function calls. + if ctx.Err() != nil { + return false, fmt.Errorf( + "aborted action run, cancelled: %w", ctx.Err()) + } + + // Execute action and log it + klog.InfoS("executing action", "label", action.Label()) + control, err := action.Run(ctx, client) + + switch { + case err != nil: + return false, fmt.Errorf( + "error during %s: %w", action.Label(), err) + case control == actions.Refresh: + return true, nil + case control == actions.Proceed: + continue + case control == actions.Errored: + return false, fmt.Errorf("action errored but provided no error") + default: + return false, fmt.Errorf("unknown control code: %d", control) + } + } + + return false, nil +} + +// listenerForPort returns a desired listener for the given port, taking the +// annotations into consideration. +func listenerForPort( + serviceInfo *serviceInfo, + port int, +) (*cloudscale.LoadBalancerListener, error) { + + var ( + listener = cloudscale.LoadBalancerListener{} + err error + ) + + listener.Protocol = serviceInfo.annotation(LoadBalancerListenerProtocol) + listener.ProtocolPort = port + listener.Name = listenerName(listener.Protocol, listener.ProtocolPort) + + listener.TimeoutClientDataMS, err = serviceInfo.annotationInt( + LoadBalancerListenerTimeoutClientDataMS) + if err != nil { + return nil, err + } + + listener.TimeoutMemberConnectMS, err = serviceInfo.annotationInt( + LoadBalancerListenerTimeoutMemberConnectMS) + if err != nil { + return nil, err + } + + listener.TimeoutMemberDataMS, err = serviceInfo.annotationInt( + LoadBalancerListenerTimeoutMemberDataMS) + if err != nil { + return nil, err + } + + listener.AllowedCIDRs, err = serviceInfo.annotationList( + LoadBalancerListenerAllowedCIDRs) + if err != nil { + return nil, err + } + + return &listener, nil +} + +// healthMonitorForPort returns a health monitor for any pool used by the +// given service, taking the annotations into consideration. +func healthMonitorForPort( + serviceInfo *serviceInfo) (*cloudscale.LoadBalancerHealthMonitor, error) { + + var ( + monitor = cloudscale.LoadBalancerHealthMonitor{} + err error + ) + + monitor.Type = serviceInfo.annotation(LoadBalancerHealthMonitorType) + + monitor.DelayS, err = serviceInfo.annotationInt( + LoadBalancerHealthMonitorDelayS) + if err != nil { + return nil, err + } + + monitor.TimeoutS, err = serviceInfo.annotationInt( + LoadBalancerHealthMonitorTimeoutS) + if err != nil { + return nil, err + } + + monitor.UpThreshold, err = serviceInfo.annotationInt( + LoadBalancerHealthMonitorUpThreshold) + if err != nil { + return nil, err + } + + monitor.DownThreshold, err = serviceInfo.annotationInt( + LoadBalancerHealthMonitorDownThreshold) + if err != nil { + return nil, err + } + + http := serviceInfo.annotation(LoadBalancerHealthMonitorHTTP) + if http != "{}" { + err = json.Unmarshal([]byte(http), &monitor.HTTP) + if err != nil { + return nil, fmt.Errorf( + "invalid json in %s: %w", + LoadBalancerHealthMonitorHTTP, + err, + ) + } + + // Make sure to fill out defaults for later comparison (the actual + // monitor will have these defaults filled out) + if monitor.HTTP.Method == "" { + monitor.HTTP.Method = "GET" + } + + if monitor.HTTP.UrlPath == "" { + monitor.HTTP.UrlPath = "/" + } + + if len(monitor.HTTP.ExpectedCodes) == 0 { + monitor.HTTP.ExpectedCodes = []string{"200"} + } + + if monitor.HTTP.Version == "" { + monitor.HTTP.Version = "1.1" + } + } + + if serviceInfo.Service.Spec.ExternalTrafficPolicy == "Local" { + monitor.Type = "http" + + // Users may override the http monitor options in this case, but + // if they are not careful, it will lead to timeouts. Overriding + // the user would be an option, but this is left in as an escape-hatch + // for special configurations that need to use their own HTTP options + // with this policy. In most cases, the default should suffice. + if http != "{}" { + klog.Warning( + "not configuring /livez http options required for", + "spec.externalTrafficPolicy=\"Local\", due to annotation", + LoadBalancerHealthMonitorHTTP, + ) + } else { + monitor.HTTP = &cloudscale.LoadBalancerHealthMonitorHTTP{ + UrlPath: "/livez", + Version: "1.0", + Host: nil, + Method: "GET", + ExpectedCodes: []string{"200"}, + } + } + } + + return &monitor, nil +} + +// poolsByName returns the pools found in the state, keyed by name +func (l *lbState) poolsByName() map[string]*cloudscale.LoadBalancerPool { + pools := make(map[string]*cloudscale.LoadBalancerPool, len(l.pools)) + for _, p := range l.pools { + pools[p.Name] = p + } + return pools +} + +// poolName produces the name of the pool for the given service port (the port +// that is bound on the load balancer and reachable from outside of it). +// +// Warning: This named is used to compare desired pools to actual pools. +// Any change to it causes pools to be rebuilt, which must be avoided! +func poolName(protocol v1.Protocol, port int32) string { + return strings.ToLower(fmt.Sprintf("%s/%d", protocol, port)) +} + +// poolMemberName produces the name of the pool member for the given node +// and port. This refers to the socket bound on the node, which receives +// traffic from the loadbalancer. +// +// Warning: This named is used to compare desired members to actual members. +// Any change to it causes members to be rebuilt, which must be avoided! +func poolMemberName(address string, port int) string { + + // Use canonical IPv6 formatting + if strings.Contains(address, ":") { + address = fmt.Sprintf("[%s]", address) + } + + return fmt.Sprintf("%s:%d", address, port) +} + +// listenerName produces the name of the listener for the given protocol +// and port. This is similar to the pool name, but here we use values that +// cloudscale API handles, not Kubernetes. +// +// Warning: This named is used to compare desired listeners to actual +// listeners. Any change to it causes members to be rebuilt, which must be +// avoided! +func listenerName(protocol string, port int) string { + return strings.ToLower(fmt.Sprintf("%s/%d", protocol, port)) +} diff --git a/pkg/cloudscale_ccm/reconcile_test.go b/pkg/cloudscale_ccm/reconcile_test.go new file mode 100644 index 0000000..f4489a0 --- /dev/null +++ b/pkg/cloudscale_ccm/reconcile_test.go @@ -0,0 +1,791 @@ +package cloudscale_ccm + +import ( + "context" + "testing" + "time" + + "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/actions" + "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/testkit" + "github.com/cloudscale-ch/cloudscale-go-sdk/v4" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" +) + +func TestPoolName(t *testing.T) { + assert.Equal(t, "tcp/80", poolName(v1.ProtocolTCP, 80)) + assert.Equal(t, "udp/443", poolName(v1.ProtocolUDP, 443)) +} + +func TestPoolMemberName(t *testing.T) { + assert.Equal(t, "10.0.0.1:80", poolMemberName("10.0.0.1", 80)) + assert.Equal(t, "[::1]:443", poolMemberName("::1", 443)) +} + +func TestListenerName(t *testing.T) { + assert.Equal(t, "tcp/80", listenerName("TCP", 80)) + assert.Equal(t, "tcp/443", listenerName("tcp", 443)) +} + +func TestDesiredName(t *testing.T) { + s := testkit.NewService("service").V1() + s.UID = "deadbeef" + + i := newServiceInfo(s, "") + + nodes := []*v1.Node{} + servers := []cloudscale.Server{} + + // No name is given, generate one + state, err := desiredLbState(i, nodes, servers) + assert.NoError(t, err) + assert.Equal(t, state.lb.Name, "k8s-service-deadbeef") + + // This can be overridden + s.Annotations = make(map[string]string) + s.Annotations[LoadBalancerName] = "foo" + + state, err = desiredLbState(i, nodes, servers) + assert.NoError(t, err) + assert.Equal(t, state.lb.Name, "foo") +} + +func TestDesiredZone(t *testing.T) { + s := testkit.NewService("service").V1() + i := newServiceInfo(s, "") + + nodes := []*v1.Node{ + testkit.NewNode("foo").V1(), + testkit.NewNode("bar").V1(), + } + + servers := []cloudscale.Server{ + {Name: "foo", ZonalResource: cloudscale.ZonalResource{ + Zone: cloudscale.Zone{Slug: "lpg1"}, + }}, + {Name: "bar", ZonalResource: cloudscale.ZonalResource{ + Zone: cloudscale.Zone{Slug: "rma1"}, + }}, + } + + // Nodes are in different zones, so it's unclear where to put the lb + _, err := desiredLbState(i, nodes, servers) + assert.Error(t, err) + + // Once a zone is given, it is clear + s.Annotations = make(map[string]string) + s.Annotations[LoadBalancerZone] = "rma1" + + state, err := desiredLbState(i, nodes, servers) + assert.NoError(t, err) + assert.Equal(t, "rma1", state.lb.Zone.Slug) +} + +func TestDesiredService(t *testing.T) { + s := testkit.NewService("service").V1() + i := newServiceInfo(s, "") + + nodes := []*v1.Node{ + testkit.NewNode("worker-1").V1(), + testkit.NewNode("worker-2").V1(), + } + + servers := []cloudscale.Server{ + { + Name: "worker-1", + ZonalResource: cloudscale.ZonalResource{ + Zone: cloudscale.Zone{Slug: "rma1"}, + }, + Interfaces: []cloudscale.Interface{{ + Addresses: []cloudscale.Address{{ + Address: "10.0.0.1", + Subnet: cloudscale.SubnetStub{ + UUID: "00000000-0000-0000-0000-000000000000", + }, + }}, + }}, + }, + { + Name: "worker-2", + ZonalResource: cloudscale.ZonalResource{ + Zone: cloudscale.Zone{Slug: "rma1"}, + }, + Interfaces: []cloudscale.Interface{{ + Addresses: []cloudscale.Address{{ + Address: "10.0.0.2", + Subnet: cloudscale.SubnetStub{ + UUID: "00000000-0000-0000-0000-000000000000", + }, + }}, + }}, + }, + } + + s.Spec.Ports = []v1.ServicePort{ + { + Protocol: "TCP", + Port: 80, + NodePort: 8080, + }, + { + Protocol: "TCP", + Port: 443, + NodePort: 8443, + }, + } + + desired, err := desiredLbState(i, nodes, servers) + assert.NoError(t, err) + + // Ensure the lb exists + assert.Equal(t, "lb-standard", desired.lb.Flavor.Slug) + assert.Len(t, desired.lb.VIPAddresses, 0) + + // Have one pool per service port + assert.Len(t, desired.pools, 2) + assert.Equal(t, desired.pools[0].Name, "tcp/80") + assert.Equal(t, desired.pools[0].Protocol, "tcp") + assert.Equal(t, desired.pools[0].Algorithm, "round_robin") + assert.Equal(t, desired.pools[1].Name, "tcp/443") + assert.Equal(t, desired.pools[0].Protocol, "tcp") + assert.Equal(t, desired.pools[0].Algorithm, "round_robin") + + // One member per server + for _, pool := range desired.pools { + members := desired.members[pool] + assert.Len(t, members, 2) + + assert.Equal(t, "10.0.0.1", members[0].Address) + assert.Equal(t, "10.0.0.2", members[1].Address) + + assert.True(t, + members[0].ProtocolPort == 8443 || + members[0].ProtocolPort == 8080) + assert.True(t, + members[1].ProtocolPort == 8443 || + members[1].ProtocolPort == 8080) + } + + // One listener per pool + for _, pool := range desired.pools { + listeners := desired.listeners[pool] + assert.Len(t, listeners, 1) + + assert.Equal(t, "tcp", listeners[0].Protocol) + assert.True(t, + listeners[0].ProtocolPort == 80 || + listeners[0].ProtocolPort == 443) + } + + // One health monitor per pool + for _, pool := range desired.pools { + monitors := desired.monitors[pool] + assert.Len(t, monitors, 1) + assert.Equal(t, "tcp", monitors[0].Type) + } +} + +func TestActualState(t *testing.T) { + server := testkit.NewMockAPIServer() + server.WithLoadBalancers([]cloudscale.LoadBalancer{ + { + UUID: "00000000-0000-0000-0000-000000000000", + Name: "k8test-service-test", + }, + }) + server.On("/v1/load-balancers/pools", 200, []cloudscale.LoadBalancerPool{ + { + Name: "tcp/80", + UUID: "00000000-0000-0000-0000-000000000001", + LoadBalancer: cloudscale.LoadBalancerStub{ + UUID: "00000000-0000-0000-0000-000000000000", + }, + }, + }) + server.On("/v1/load-balancers/pools/00000000-0000-0000-0000-000000000001"+ + "/members", 200, []cloudscale.LoadBalancerPoolMember{ + { + Name: "10.0.0.1:8080", + Pool: cloudscale.LoadBalancerPoolStub{ + UUID: "00000000-0000-0000-0000-000000000001", + }, + }, + }) + server.On("/v1/load-balancers/listeners", 200, + []cloudscale.LoadBalancerListener{ + { + Name: "tcp/80", + Pool: &cloudscale.LoadBalancerPoolStub{ + UUID: "00000000-0000-0000-0000-000000000001", + }, + }, + }, + ) + server.On("/v1/load-balancers/health-monitors", 200, + []cloudscale.LoadBalancerHealthMonitor{ + { + Type: "tcp", + Pool: cloudscale.LoadBalancerPoolStub{ + UUID: "00000000-0000-0000-0000-000000000001", + }, + }, + }, + ) + server.Start() + defer server.Close() + + mapper := lbMapper{client: server.Client()} + + s := testkit.NewService("service").V1() + s.Annotations = make(map[string]string) + s.Annotations[LoadBalancerUUID] = "00000000-0000-0000-0000-000000000000" + + i := newServiceInfo(s, "") + + actual, err := actualLbState(context.Background(), &mapper, i) + assert.NoError(t, err) + + assert.Equal(t, "k8test-service-test", actual.lb.Name) + assert.Len(t, actual.pools, 1) + assert.Len(t, actual.members, 1) + assert.Len(t, actual.listeners, 1) + assert.Len(t, actual.monitors, 1) + + p := actual.pools[0] + assert.Equal(t, "tcp/80", p.Name) + assert.Equal(t, "10.0.0.1:8080", actual.members[p][0].Name) + assert.Equal(t, "tcp/80", actual.listeners[p][0].Name) + assert.Equal(t, "tcp", actual.monitors[p][0].Type) +} + +func TestNextLbActionsInvalidCalls(t *testing.T) { + assertError := func(d *lbState, a *lbState) { + _, err := nextLbActions(d, a) + assert.Error(t, err) + } + + assertError(nil, nil) + assertError(nil, &lbState{}) + assertError(&lbState{}, nil) +} + +// TestNextLbProhibitDangerousChanges ensures that changes that would cause +// a loadbalancer to be recreated (potentially losing its automatically +// assigned IP addresss) are prohibited. +// +// Such actions can still be done by manually recreating the service in +// Kubernetes. Some actions can be supported in the future, but this should +// be done cautiously. +// +// Changes "inside" the load balancer can be done more aggressively, as all +// states can be recreated. We cannot however, regain a previously IP address, +// assigned automatically. +func TestNextLbProhibitDangerousChanges(t *testing.T) { + assertError := func(d *lbState, a *lbState) { + _, err := nextLbActions(d, a) + assert.Error(t, err) + } + + // No automatic change of flavor (to be implemented in the future) + one := &cloudscale.LoadBalancer{ + Name: "foo", + Flavor: cloudscale.LoadBalancerFlavorStub{Slug: "lb-standard"}, + } + two := &cloudscale.LoadBalancer{ + Name: "bar", + Flavor: cloudscale.LoadBalancerFlavorStub{Slug: "lb-large"}, + } + + assertError(&lbState{lb: one}, &lbState{lb: two}) + + // No automatic change of VIP addresses + one = &cloudscale.LoadBalancer{ + Name: "foo", + VIPAddresses: []cloudscale.VIPAddress{ + {Address: "10.0.0.1"}, + }, + } + two = &cloudscale.LoadBalancer{ + Name: "bar", + VIPAddresses: []cloudscale.VIPAddress{ + {Address: "10.0.0.2"}, + }, + } + + assertError(&lbState{lb: one}, &lbState{lb: two}) + + // No automatic change of zone + one = &cloudscale.LoadBalancer{ + Name: "foo", + ZonalResource: cloudscale.ZonalResource{ + Zone: cloudscale.Zone{Slug: "lpg1"}, + }, + } + two = &cloudscale.LoadBalancer{ + Name: "bar", + ZonalResource: cloudscale.ZonalResource{ + Zone: cloudscale.Zone{Slug: "rma1"}, + }, + } + + assertError(&lbState{lb: one}, &lbState{lb: two}) +} + +func TestNextLbActions(t *testing.T) { + + assertActions := func(d *lbState, a *lbState, expected []actions.Action) { + actions, err := nextLbActions(d, a) + assert.NoError(t, err) + assert.Equal(t, expected, actions) + } + + lb := &cloudscale.LoadBalancer{ + HREF: "foo", + } + + // Noop + assertActions(&lbState{}, &lbState{}, []actions.Action{}) + + // The await action is always there, to ensure we are not working on + // an LB that cannot be updated. + lb.Status = "changing" + assertActions(&lbState{lb: lb}, &lbState{lb: lb}, []actions.Action{ + actions.AwaitLb(lb), + }) + + lb.Status = "ready" + assertActions(&lbState{lb: lb}, &lbState{lb: lb}, []actions.Action{ + actions.AwaitLb(lb), + }) + + // Delete lb if not desired + assertActions(&lbState{}, &lbState{lb: lb}, []actions.Action{ + actions.AwaitLb(lb), + actions.DeleteResource("foo"), + }) + + // Create lb if desired + assertActions(&lbState{lb: lb}, &lbState{}, []actions.Action{ + actions.CreateLb(lb), + actions.Refetch(), + }) + + // Rename lb if name changed. This is safe because the lbs have either + // been acquired by name (in which case both will have the same name), + // or by UUID through the service annotation. + one := &cloudscale.LoadBalancer{ + Name: "foo", + } + two := &cloudscale.LoadBalancer{ + UUID: "2", + Name: "bar", + } + assertActions(&lbState{lb: one}, &lbState{lb: two}, []actions.Action{ + actions.AwaitLb(two), + actions.RenameLb("2", "bar"), + }) +} + +func TestNextPoolActions(t *testing.T) { + + assertActions := func(d *lbState, a *lbState, expected []actions.Action) { + actions, err := nextLbActions(d, a) + assert.NoError(t, err) + assert.Equal(t, expected, actions) + } + + lb := &cloudscale.LoadBalancer{ + UUID: "foo", + HREF: "foo", + } + + // No change in pools + desired := []*cloudscale.LoadBalancerPool{ + {HREF: "tcp/80", Name: "tcp/80", Algorithm: "round_robin"}, + } + actual := []*cloudscale.LoadBalancerPool{ + {HREF: "tcp/80", Name: "tcp/80", Algorithm: "round_robin"}, + } + + assertActions( + &lbState{lb: lb, pools: desired}, + &lbState{lb: lb, pools: actual}, + []actions.Action{ + actions.AwaitLb(lb), + }, + ) + + // Delete pools that are not wanted + desired = []*cloudscale.LoadBalancerPool{ + {HREF: "tcp/80", Name: "tcp/80", Algorithm: "round_robin"}, + } + actual = []*cloudscale.LoadBalancerPool{ + {HREF: "tcp/80", Name: "tcp/80", Algorithm: "round_robin"}, + {HREF: "tcp/443", Name: "tcp/443", Algorithm: "round_robin"}, + } + + assertActions( + &lbState{lb: lb, pools: desired}, + &lbState{lb: lb, pools: actual}, + []actions.Action{ + actions.AwaitLb(lb), + actions.DeleteResource("tcp/443"), + actions.Sleep(500 * time.Millisecond), + actions.Refetch(), + }, + ) + + // Create pools that do not exist + desired = []*cloudscale.LoadBalancerPool{ + {HREF: "tcp/80", Name: "tcp/80", Algorithm: "round_robin"}, + } + assertActions( + &lbState{lb: lb, pools: desired}, + &lbState{lb: lb}, + []actions.Action{ + actions.AwaitLb(lb), + actions.CreatePool("foo", desired[0]), + actions.Refetch(), + }, + ) + + // Delete pools that do not match + desired = []*cloudscale.LoadBalancerPool{ + {HREF: "tcp/80", Name: "tcp/80", Algorithm: "round_robin"}, + {HREF: "tcp/443", Name: "tcp/443", Algorithm: "round_robin"}, + } + actual = []*cloudscale.LoadBalancerPool{ + {HREF: "tcp/80", Name: "tcp/80", Algorithm: "round_robin"}, + {HREF: "tcp/4433", Name: "tcp/4433", Algorithm: "round_robin"}, + } + + assertActions( + &lbState{lb: lb, pools: desired}, + &lbState{lb: lb, pools: actual}, + []actions.Action{ + actions.AwaitLb(lb), + actions.DeleteResource("tcp/4433"), + actions.Sleep(500 * time.Millisecond), + actions.CreatePool("foo", desired[1]), + actions.Refetch(), + }, + ) + + // Recreate pools if details change + desired = []*cloudscale.LoadBalancerPool{ + {HREF: "tcp/80", Name: "tcp/80", Algorithm: "source_ip"}, + } + actual = []*cloudscale.LoadBalancerPool{ + {HREF: "tcp/80", Name: "tcp/80", Algorithm: "round_robin"}, + } + + assertActions( + &lbState{lb: lb, pools: desired}, + &lbState{lb: lb, pools: actual}, + []actions.Action{ + actions.AwaitLb(lb), + actions.DeleteResource("tcp/80"), + actions.Sleep(500 * time.Millisecond), + actions.CreatePool("foo", desired[0]), + actions.Refetch(), + }, + ) +} + +func TestNextPoolMemberActions(t *testing.T) { + + assertActions := func(d *lbState, a *lbState, expected []actions.Action) { + actions, err := nextLbActions(d, a) + assert.NoError(t, err) + assert.Equal(t, expected, actions) + } + + lb := &cloudscale.LoadBalancer{ + UUID: "foo", + HREF: "foo", + } + + desired := newLbState(lb) + + desired.pools = []*cloudscale.LoadBalancerPool{ + {UUID: "1", HREF: "tcp/80", Name: "tcp/80", Algorithm: "round_robin"}, + } + + actual := newLbState(desired.lb) + actual.pools = desired.pools + + pool := desired.pools[0] + + // Create pool members + desired.members[pool] = []cloudscale.LoadBalancerPoolMember{ + {Address: "10.0.0.1", ProtocolPort: 10000}, + } + actual.members[pool] = []cloudscale.LoadBalancerPoolMember{} + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + actions.CreatePoolMember("1", &desired.members[pool][0]), + actions.Refetch(), + }) + + // Delete pool members + desired.members[pool] = []cloudscale.LoadBalancerPoolMember{} + actual.members[pool] = []cloudscale.LoadBalancerPoolMember{ + {HREF: "10.0.0.1:10000", Address: "10.0.0.1", ProtocolPort: 10000}, + } + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + actions.DeleteResource("10.0.0.1:10000"), + actions.Sleep(500 * time.Millisecond), + actions.Refetch(), + }) + + // Recreate pool members + desired.members[pool] = []cloudscale.LoadBalancerPoolMember{ + {Address: "10.0.0.1", ProtocolPort: 2}, + } + actual.members[pool] = []cloudscale.LoadBalancerPoolMember{ + {HREF: "actual", Address: "10.0.0.1", ProtocolPort: 1}, + } + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + actions.DeleteResource("actual"), + actions.Sleep(500 * time.Millisecond), + actions.Sleep(5000 * time.Millisecond), + actions.CreatePoolMember("1", &desired.members[pool][0]), + actions.Refetch(), + }) +} + +func TestNextListenerActions(t *testing.T) { + + assertActions := func(d *lbState, a *lbState, expected []actions.Action) { + actions, err := nextLbActions(d, a) + assert.NoError(t, err) + assert.Equal(t, expected, actions) + } + + lb := &cloudscale.LoadBalancer{ + UUID: "foo", + HREF: "foo", + } + + desired := newLbState(lb) + + desired.pools = []*cloudscale.LoadBalancerPool{ + {UUID: "1", HREF: "tcp/80", Name: "tcp/80", Algorithm: "round_robin"}, + } + + actual := newLbState(desired.lb) + actual.pools = desired.pools + + pool := desired.pools[0] + + // Create listeners + desired.listeners[pool] = []cloudscale.LoadBalancerListener{ + {Name: "tcp/80", ProtocolPort: 80}, + } + actual.listeners[pool] = []cloudscale.LoadBalancerListener{} + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + actions.CreateListener("1", &desired.listeners[pool][0]), + actions.Refetch(), + }) + + // Delete listeners + desired.listeners[pool] = []cloudscale.LoadBalancerListener{} + actual.listeners[pool] = []cloudscale.LoadBalancerListener{ + {HREF: "tcp/80", Name: "tcp/80", ProtocolPort: 80}, + } + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + actions.DeleteResource("tcp/80"), + actions.Sleep(500 * time.Millisecond), + actions.Refetch(), + }) + + // Recreate listeners + desired.listeners[pool] = []cloudscale.LoadBalancerListener{ + {HREF: "80", Name: "80", Protocol: "tcp"}, + } + actual.listeners[pool] = []cloudscale.LoadBalancerListener{ + {HREF: "80", Name: "80", Protocol: "udp"}, + } + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + actions.DeleteResource("80"), + actions.Sleep(500 * time.Millisecond), + actions.Sleep(5000 * time.Millisecond), + actions.CreateListener("1", &desired.listeners[pool][0]), + actions.Refetch(), + }) + + // Update allowed CIDRs + desired.listeners[pool] = []cloudscale.LoadBalancerListener{ + {UUID: "1", HREF: "tcp/80", Name: "tcp/80", ProtocolPort: 80, + AllowedCIDRs: []string{"7.0.0.0/8"}}, + } + actual.listeners[pool] = []cloudscale.LoadBalancerListener{ + {UUID: "1", HREF: "tcp/80", Name: "tcp/80", ProtocolPort: 80, + AllowedCIDRs: []string{}}, + } + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + actions.UpdateListenerAllowedCIDRs("1", []string{"7.0.0.0/8"}), + }) + + // Update timeouts + desired.listeners[pool] = []cloudscale.LoadBalancerListener{ + {UUID: "1", HREF: "tcp/80", Name: "tcp/80", ProtocolPort: 80, + TimeoutClientDataMS: 1, + TimeoutMemberConnectMS: 2, + TimeoutMemberDataMS: 3, + }, + } + actual.listeners[pool] = []cloudscale.LoadBalancerListener{ + {UUID: "1", HREF: "tcp/80", Name: "tcp/80", ProtocolPort: 80, + TimeoutClientDataMS: 3, + TimeoutMemberConnectMS: 2, + TimeoutMemberDataMS: 1, + }, + } + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + actions.UpdateListenerTimeout("1", 1, "client-data-ms"), + actions.UpdateListenerTimeout("1", 3, "member-data-ms"), + }) + +} + +func TestNextMonitorActions(t *testing.T) { + + assertActions := func(d *lbState, a *lbState, expected []actions.Action) { + actions, err := nextLbActions(d, a) + assert.NoError(t, err) + assert.Equal(t, expected, actions) + } + + lb := &cloudscale.LoadBalancer{ + UUID: "foo", + HREF: "foo", + } + + desired := newLbState(lb) + + desired.pools = []*cloudscale.LoadBalancerPool{ + {UUID: "1", HREF: "tcp/80", Name: "tcp/80", Algorithm: "round_robin"}, + } + + actual := newLbState(desired.lb) + actual.pools = desired.pools + + pool := desired.pools[0] + + // Create monitors + desired.monitors[pool] = []cloudscale.LoadBalancerHealthMonitor{ + {Type: "tcp"}, + } + actual.monitors[pool] = []cloudscale.LoadBalancerHealthMonitor{} + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + actions.CreateHealthMonitor("1", &desired.monitors[pool][0]), + actions.Refetch(), + }) + + // Delete monitors + desired.monitors[pool] = []cloudscale.LoadBalancerHealthMonitor{} + actual.monitors[pool] = []cloudscale.LoadBalancerHealthMonitor{ + {HREF: "tcp", Type: "tcp"}, + } + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + actions.DeleteResource("tcp"), + actions.Sleep(500 * time.Millisecond), + actions.Refetch(), + }) + + // Recreate monitors + desired.monitors[pool] = []cloudscale.LoadBalancerHealthMonitor{ + {Type: "http"}, + } + actual.monitors[pool] = []cloudscale.LoadBalancerHealthMonitor{ + {HREF: "tcp", Type: "tcp"}, + } + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + actions.DeleteResource("tcp"), + actions.Sleep(500 * time.Millisecond), + actions.Sleep(5000 * time.Millisecond), + actions.CreateHealthMonitor("1", &desired.monitors[pool][0]), + actions.Refetch(), + }) + + // Update http options (no change) + desired.monitors[pool] = []cloudscale.LoadBalancerHealthMonitor{ + {Type: "http", HTTP: &cloudscale.LoadBalancerHealthMonitorHTTP{ + Method: "HEAD", + }}, + } + actual.monitors[pool] = []cloudscale.LoadBalancerHealthMonitor{ + {Type: "http", HTTP: &cloudscale.LoadBalancerHealthMonitorHTTP{ + Method: "HEAD", + }}, + } + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + }) + + // Update http options + desired.monitors[pool] = []cloudscale.LoadBalancerHealthMonitor{ + {HTTP: &cloudscale.LoadBalancerHealthMonitorHTTP{ + Method: "HEAD", + }}, + } + actual.monitors[pool] = []cloudscale.LoadBalancerHealthMonitor{ + {UUID: "1", HTTP: &cloudscale.LoadBalancerHealthMonitorHTTP{ + Method: "GET", + }}, + } + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + actions.UpdateMonitorHTTPMethod("1", "HEAD"), + }) + + // Update monitor numbers + desired.monitors[pool] = []cloudscale.LoadBalancerHealthMonitor{ + { + DelayS: 1, + TimeoutS: 2, + UpThreshold: 3, + DownThreshold: 4, + }, + } + actual.monitors[pool] = []cloudscale.LoadBalancerHealthMonitor{ + { + UUID: "1", + DelayS: 4, + TimeoutS: 3, + UpThreshold: 2, + DownThreshold: 4, + }, + } + + assertActions(desired, actual, []actions.Action{ + actions.AwaitLb(lb), + actions.UpdateMonitorNumber("1", 1, "delay-s"), + actions.UpdateMonitorNumber("1", 2, "timeout-s"), + actions.UpdateMonitorNumber("1", 3, "up-threshold"), + }) +} diff --git a/pkg/cloudscale_ccm/server_mapper.go b/pkg/cloudscale_ccm/server_mapper.go index 523fce7..2ef91d9 100644 --- a/pkg/cloudscale_ccm/server_mapper.go +++ b/pkg/cloudscale_ccm/server_mapper.go @@ -5,7 +5,8 @@ import ( "errors" "fmt" - "github.com/cloudscale-ch/cloudscale-go-sdk" + "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/limiter" + "github.com/cloudscale-ch/cloudscale-go-sdk/v4" v1 "k8s.io/api/core/v1" ) @@ -18,10 +19,10 @@ type serverMapper struct { func (s *serverMapper) findByNode( ctx context.Context, node *v1.Node, -) *limiter[cloudscale.Server] { +) *limiter.Limiter[cloudscale.Server] { if node == nil { - return newLimiter[cloudscale.Server](nil) + return limiter.New[cloudscale.Server](nil) } if node.Spec.ProviderID != "" { @@ -34,7 +35,7 @@ func (s *serverMapper) findByNode( // // See also https://github.com/kubernetes/cloud-provider/issues/3 if err != nil { - return newLimiter[cloudscale.Server](fmt.Errorf( + return limiter.New[cloudscale.Server](fmt.Errorf( "%s is not a valid cloudscale provider id: %w", node.Spec.ProviderID, err, @@ -47,24 +48,45 @@ func (s *serverMapper) findByNode( return s.findByName(ctx, node.Name) } +// mapNodes returns a server for each given node. If a 1:1 mapping across all +// given nodes can be established, an error is returned. +func (s *serverMapper) mapNodes( + ctx context.Context, + nodes []*v1.Node, +) *limiter.Limiter[cloudscale.Server] { + servers := make([]cloudscale.Server, 0, len(nodes)) + + for _, node := range nodes { + server, err := s.findByNode(ctx, node).One() + + if err != nil { + return limiter.New[cloudscale.Server](err) + } + + servers = append(servers, *server) + } + + return limiter.New[cloudscale.Server](nil, servers...) +} + // getByProviderID tries to access the server by provider ID (UUID) func (s *serverMapper) getByProviderID( ctx context.Context, id cloudscaleProviderID, -) *limiter[cloudscale.Server] { +) *limiter.Limiter[cloudscale.Server] { server, err := s.client.Servers.Get(ctx, id.UUID().String()) if err != nil { var response *cloudscale.ErrorResponse if errors.As(err, &response) && response.StatusCode == 404 { - return newLimiter[cloudscale.Server](nil) + return limiter.New[cloudscale.Server](nil) } - return newLimiter[cloudscale.Server](err) + return limiter.New[cloudscale.Server](err) } - return newLimiter[cloudscale.Server](nil, *server) + return limiter.New[cloudscale.Server](nil, *server) } // findByName returns servers matching the given name (there may be multiple @@ -72,11 +94,11 @@ func (s *serverMapper) getByProviderID( func (s *serverMapper) findByName( ctx context.Context, name string, -) *limiter[cloudscale.Server] { +) *limiter.Limiter[cloudscale.Server] { servers, err := s.client.Servers.List(ctx) if err != nil { - return newLimiter[cloudscale.Server](err) + return limiter.New[cloudscale.Server](err) } matches := []cloudscale.Server{} @@ -88,11 +110,13 @@ func (s *serverMapper) findByName( } } - return newLimiter[cloudscale.Server](nil, matches...) + return limiter.New[cloudscale.Server](nil, matches...) } -// serverNodeAddresses returns a v1.nodeAddresses slice for the metadata -func (s *serverMapper) nodeAddresses(server *cloudscale.Server) []v1.NodeAddress { +// nodeAddresses returns a v1.nodeAddresses slice for the metadata +func (s *serverMapper) nodeAddresses( + server *cloudscale.Server) []v1.NodeAddress { + if server == nil { return []v1.NodeAddress{} } diff --git a/pkg/cloudscale_ccm/server_mapper_test.go b/pkg/cloudscale_ccm/server_mapper_test.go index dac189e..da6f91c 100644 --- a/pkg/cloudscale_ccm/server_mapper_test.go +++ b/pkg/cloudscale_ccm/server_mapper_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/testkit" - "github.com/cloudscale-ch/cloudscale-go-sdk" + cloudscale "github.com/cloudscale-ch/cloudscale-go-sdk/v4" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" ) @@ -24,18 +24,18 @@ func TestServerByNode(t *testing.T) { mapper := serverMapper{client: server.Client()} assertMatch := func(name string, node *v1.Node) { - match, err := mapper.findByNode(context.Background(), node).one() + match, err := mapper.findByNode(context.Background(), node).One() assert.NoError(t, err) assert.Equal(t, name, match.Name) } assertMissing := func(node *v1.Node) { - err := mapper.findByNode(context.Background(), node).none() + err := mapper.findByNode(context.Background(), node).None() assert.NoError(t, err) } assertError := func(node *v1.Node) { - _, err := mapper.findByNode(context.Background(), node).one() + _, err := mapper.findByNode(context.Background(), node).One() assert.Error(t, err) } @@ -83,7 +83,7 @@ func TestNoServers(t *testing.T) { mapper := serverMapper{client: server.Client()} assertMissing := func(node *v1.Node) { - match, err := mapper.findByNode(context.Background(), node).atMostOne() + match, err := mapper.findByNode(context.Background(), node).AtMostOne() assert.NoError(t, err) assert.Nil(t, match) } diff --git a/pkg/cloudscale_ccm/service_info.go b/pkg/cloudscale_ccm/service_info.go new file mode 100644 index 0000000..5e2e330 --- /dev/null +++ b/pkg/cloudscale_ccm/service_info.go @@ -0,0 +1,177 @@ +package cloudscale_ccm + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + v1 "k8s.io/api/core/v1" + + "k8s.io/klog/v2" +) + +// serviceInfo wraps v1.Service with cloudscale specific methods +type serviceInfo struct { + Service *v1.Service + clusterName string +} + +func newServiceInfo(service *v1.Service, clusterName string) *serviceInfo { + + if service == nil { + panic("v1.Service pointer is nil") + } + + return &serviceInfo{Service: service, clusterName: clusterName} +} + +// isSupported checks if the given service is one we care about. If we do +// not, false is returned, with an optional error message to give a hint +// about why we do not support it (may be ignored). +// +// This is due to the fact that Kubernetes might send a service our way, that +// is not handled by us. +func (s serviceInfo) isSupported() (bool, error) { + + // If you specify .spec.loadBalancerClass, it is assumed that a load + // balancer implementation that matches the specified class is watching + // for Services. Any default load balancer implementation (for example, + // the one provided by the cloud provider) will ignore Services that have + // this field set. + // + // https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + if s.Service.Spec.LoadBalancerClass != nil { + return false, fmt.Errorf( + "not supported LoadBalancerClass: %s", + *s.Service.Spec.LoadBalancerClass, + ) + } + + return true, nil +} + +// Returns the annotation for the given key (see LoadBalancer...), and will +// default to an empty string, unless some other default is specified. +// +// Warning: These defaults should not be changed going forward, as that would +// cause CCM to apply changes to existing clusters. If *really* necessary, +// use the LoadBalancerConfigVersion annotation stored on the service and +// add a new code path that accounts for this version when handing out +// defaults. +// +// Storing of all annotations on the service would be an alternative, but it +// would lead to excessive annotation usage, which should be avoided. +// +// Having a different code path for defaults vs. set values would make +// the code more complicated on the other hand. +// +// Not touching these defaults is therefore the simplest approach. +func (s serviceInfo) annotation(key string) string { + switch key { + case LoadBalancerConfigVersion: + return "1" + case LoadBalancerName: + // Take the load balancer name or generate one + return s.annotationOrElse(key, func() string { + return fmt.Sprintf("k8s-service-%s", s.Service.UID) + }) + case LoadBalancerZone: + return s.annotationOrDefault(key, "") + case LoadBalancerUUID: + return s.annotationOrDefault(key, "") + case LoadBalancerPoolProtocol: + return s.annotationOrDefault(key, "tcp") + case LoadBalancerFlavor: + return s.annotationOrDefault(key, "lb-standard") + case LoadBalancerPoolAlgorithm: + return s.annotationOrDefault(key, "round_robin") + case LoadBalancerHealthMonitorDelayS: + return s.annotationOrDefault(key, "2") + case LoadBalancerHealthMonitorTimeoutS: + return s.annotationOrDefault(key, "1") + case LoadBalancerHealthMonitorUpThreshold: + return s.annotationOrDefault(key, "2") + case LoadBalancerHealthMonitorDownThreshold: + return s.annotationOrDefault(key, "3") + case LoadBalancerHealthMonitorType: + return s.annotationOrDefault(key, "tcp") + case LoadBalancerHealthMonitorHTTP: + return s.annotationOrDefault(key, "{}") + case LoadBalancerListenerProtocol: + return s.annotationOrDefault(key, "tcp") + case LoadBalancerListenerAllowedCIDRs: + return s.annotationOrDefault(key, "[]") + case LoadBalancerListenerTimeoutClientDataMS: + return s.annotationOrDefault(key, "50000") + case LoadBalancerListenerTimeoutMemberConnectMS: + return s.annotationOrDefault(key, "5000") + case LoadBalancerListenerTimeoutMemberDataMS: + return s.annotationOrDefault(key, "50000") + default: + return s.annotationOrElse(key, func() string { + klog.Warning("unknown annotation:", key) + return "" + }) + } +} + +// Returns the annotation as int, or an error +func (s serviceInfo) annotationInt(key string) (int, error) { + v, err := strconv.Atoi(s.annotation(key)) + if err != nil { + return 0, fmt.Errorf( + "cannot convert %s to int (%s): %w", + s.annotation(key), + LoadBalancerHealthMonitorDelayS, + err, + ) + } + return v, nil +} + +// Returns the annotation as string list, or an error. The supported input +// format is JSON (e.g. `["foo", "bar"]`). An empty string is treated as +// an empty list. +func (s serviceInfo) annotationList(key string) ([]string, error) { + value := s.annotation(key) + + if strings.Trim(value, " ") == "" { + return make([]string, 0), nil + } + + var list []string + + err := json.Unmarshal([]byte(value), &list) + if err != nil { + return nil, fmt.Errorf( + "not a valid JSON string list: %s (%s): %w", + value, + LoadBalancerHealthMonitorDelayS, + err, + ) + } + + return list, nil +} + +// annotationOrElase returns the annotation with the given key, or returns the +// result of the fallback function if the key does not exist. +func (s serviceInfo) annotationOrElse(key string, fn func() string) string { + if s.Service.Annotations == nil { + return fn() + } + + value, ok := s.Service.Annotations[key] + if !ok { + return fn() + } + + return value +} + +// annotationOrDefault returns the annotation with the given key, or the +// default value if the key does not exist. +func (s serviceInfo) annotationOrDefault(key string, value string) string { + return s.annotationOrElse(key, func() string { return value }) +} diff --git a/pkg/cloudscale_ccm/service_info_test.go b/pkg/cloudscale_ccm/service_info_test.go new file mode 100644 index 0000000..071cd48 --- /dev/null +++ b/pkg/cloudscale_ccm/service_info_test.go @@ -0,0 +1,103 @@ +package cloudscale_ccm + +import ( + "testing" + + "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/testkit" + "github.com/stretchr/testify/assert" +) + +func TestNewServicePanic(t *testing.T) { + assert.Panics(t, func() { + newServiceInfo(nil, "") + }) +} + +func TestIsSupported(t *testing.T) { + s := testkit.NewService("service").V1() + supported, err := newServiceInfo(s, "").isSupported() + assert.True(t, supported) + assert.NoError(t, err) +} + +func TestIsNotSupported(t *testing.T) { + s := testkit.NewService("service").V1() + + class := "foo" + s.Spec.LoadBalancerClass = &class + + supported, err := newServiceInfo(s, "").isSupported() + assert.False(t, supported) + assert.Error(t, err) +} + +func TestAnnotation(t *testing.T) { + s := testkit.NewService("service").V1() + i := newServiceInfo(s, "") + + assert.Empty(t, i.annotation(LoadBalancerUUID)) + assert.Equal(t, i.annotation(LoadBalancerFlavor), "lb-standard") + assert.Equal(t, i.annotation("foo"), "") + + s.Annotations = make(map[string]string) + + assert.Empty(t, i.annotation(LoadBalancerUUID)) + assert.Equal(t, i.annotation(LoadBalancerFlavor), "lb-standard") + assert.Equal(t, i.annotation("foo"), "") + + s.Annotations[LoadBalancerUUID] = "1234" + s.Annotations[LoadBalancerFlavor] = "strawberry" + + assert.Equal(t, i.annotation(LoadBalancerUUID), "1234") + assert.Equal(t, i.annotation(LoadBalancerFlavor), "strawberry") + assert.Equal(t, i.annotation("foo"), "") +} + +func TestAnnotationInt(t *testing.T) { + s := testkit.NewService("service").V1() + i := newServiceInfo(s, "") + + s.Annotations = make(map[string]string) + s.Annotations["foo"] = "1" + s.Annotations["bar"] = "a" + + v, err := i.annotationInt("foo") + assert.Equal(t, v, 1) + assert.NoError(t, err) + + v, err = i.annotationInt("bar") + assert.Equal(t, v, 0) + assert.Error(t, err) + + v, err = i.annotationInt("missing") + assert.Equal(t, v, 0) + assert.Error(t, err) +} + +func TestAnnotationList(t *testing.T) { + s := testkit.NewService("service").V1() + i := newServiceInfo(s, "") + + s.Annotations = make(map[string]string) + s.Annotations["foo"] = "" + s.Annotations["bar"] = "[]" + s.Annotations["baz"] = `["foo", "bar"]` + s.Annotations["qux"] = `["f...` + + v, err := i.annotationList("foo") + assert.Equal(t, v, []string{}) + assert.NoError(t, err) + + v, err = i.annotationList("bar") + assert.Equal(t, v, []string{}) + assert.NoError(t, err) + + v, err = i.annotationList("baz") + assert.Equal(t, v, []string{"foo", "bar"}) + assert.NoError(t, err) + + var empty []string + v, err = i.annotationList("qux") + assert.Equal(t, v, empty) + assert.Error(t, err) +} diff --git a/pkg/internal/actions/actions.go b/pkg/internal/actions/actions.go new file mode 100644 index 0000000..b3cc950 --- /dev/null +++ b/pkg/internal/actions/actions.go @@ -0,0 +1,564 @@ +package actions + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/cloudscale-ch/cloudscale-go-sdk/v4" +) + +type Action interface { + Label() string + Run(ctx context.Context, client *cloudscale.Client) (Control, error) +} + +// RefetchAction is an empty action that sends a `Refresh` control code +type RefetchAction struct{} + +func Refetch() Action { + return &RefetchAction{} +} + +func (a *RefetchAction) Label() string { + return "refetch" +} + +func (a *RefetchAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + return Refresh, nil +} + +// CreateLbAction allows to create a load balancer that does not exist yet, +// using a fully speced load balancer instance. +type CreateLbAction struct { + lb *cloudscale.LoadBalancer +} + +func CreateLb(lb *cloudscale.LoadBalancer) Action { + return &CreateLbAction{lb: lb} +} + +func (a *CreateLbAction) Label() string { + return fmt.Sprintf("create-lb(%s)", a.lb.Name) +} + +func (a *CreateLbAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + addrs := make([]cloudscale.VIPAddressRequest, 0, len(a.lb.VIPAddresses)) + for _, addr := range a.lb.VIPAddresses { + addrs = append(addrs, cloudscale.VIPAddressRequest{ + Address: addr.Address, + Subnet: addr.Subnet.CIDR, + }) + } + + _, err := client.LoadBalancers.Create(ctx, &cloudscale.LoadBalancerRequest{ + Name: a.lb.Name, + Flavor: a.lb.Flavor.Slug, + VIPAddresses: &addrs, + ZonalResourceRequest: cloudscale.ZonalResourceRequest{ + Zone: a.lb.Zone.Slug, + }, + }) + + return ProceedOnSuccess(err) +} + +// RenameLbAction allows to rename a load balancer via UUID +type RenameLbAction struct { + UUID string + Name string +} + +func RenameLb(uuid string, name string) Action { + return &RenameLbAction{UUID: uuid, Name: name} +} + +func (a *RenameLbAction) Label() string { + return fmt.Sprintf("rename-lb(%s -> %s)", a.UUID, a.Name) +} + +func (a *RenameLbAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + return ProceedOnSuccess(client.LoadBalancers.Update(ctx, a.UUID, + &cloudscale.LoadBalancerRequest{ + Name: a.Name, + }, + )) +} + +// AwaitLbAction waits for a load balancer to be ready +type AwaitLbAction struct { + lb *cloudscale.LoadBalancer +} + +func AwaitLb(lb *cloudscale.LoadBalancer) Action { + return &AwaitLbAction{lb: lb} +} + +func (a *AwaitLbAction) Label() string { + return fmt.Sprintf( + "await-lb(%s is %s)", a.lb.Name, a.lb.Status) +} + +func (a *AwaitLbAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + // Abort if there are states we cannot continue with + switch a.lb.Status { + case "changing": + return Refresh, nil + default: + return Proceed, nil + } +} + +// DeleteMonitorsAction deletes the given resources +type DeleteResourceAction struct { + url string +} + +func DeleteResource(url string) Action { + return &DeleteResourceAction{url: url} +} + +func (a *DeleteResourceAction) Label() string { + return fmt.Sprintf("delete-resource(%s)", a.url) +} + +func (a *DeleteResourceAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + req, err := client.NewRequest(ctx, http.MethodDelete, a.url, nil) + if err != nil { + return Errored, fmt.Errorf( + "delete resource action for %s failed: %w", a.url, err) + } + + return ProceedOnSuccess(client.Do(ctx, req, nil)) +} + +// SleepAction sleeps for a given amount of time, unless cancelled +type SleepAction struct { + duration time.Duration +} + +func Sleep(duration time.Duration) Action { + return &SleepAction{duration: duration} +} + +func (a *SleepAction) Label() string { + return fmt.Sprintf("sleep-%s", a.duration) +} + +func (a *SleepAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + select { + case <-ctx.Done(): + return Errored, fmt.Errorf("action has been aborted") + case <-time.After(a.duration): + break + } + + return Proceed, nil +} + +// CreatePoolAction creates a pool +type CreatePoolAction struct { + lbUUID string + pool *cloudscale.LoadBalancerPool +} + +func CreatePool(lbUUID string, pool *cloudscale.LoadBalancerPool) Action { + return &CreatePoolAction{lbUUID: lbUUID, pool: pool} +} + +func (a *CreatePoolAction) Label() string { + return fmt.Sprintf("create-pool(%s/%s)", a.lbUUID, a.pool.Name) +} + +func (a *CreatePoolAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + _, err := client.LoadBalancerPools.Create(ctx, + &cloudscale.LoadBalancerPoolRequest{ + Name: a.pool.Name, + LoadBalancer: a.lbUUID, + Algorithm: a.pool.Algorithm, + Protocol: a.pool.Protocol, + }, + ) + + return ProceedOnSuccess(err) +} + +// CreaetPoolMemberAction creates a pool member +type CreatePoolMemberAction struct { + poolUUID string + member *cloudscale.LoadBalancerPoolMember +} + +func CreatePoolMember( + poolUUID string, member *cloudscale.LoadBalancerPoolMember) Action { + + return &CreatePoolMemberAction{poolUUID: poolUUID, member: member} +} + +func (a *CreatePoolMemberAction) Label() string { + return fmt.Sprintf("create-pool-member(%s/%s)", a.poolUUID, a.member.Name) +} + +func (a *CreatePoolMemberAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + _, err := client.LoadBalancerPoolMembers.Create(ctx, a.poolUUID, + &cloudscale.LoadBalancerPoolMemberRequest{ + Name: a.member.Name, + ProtocolPort: a.member.ProtocolPort, + MonitorPort: a.member.MonitorPort, + Address: a.member.Address, + Subnet: a.member.Subnet.UUID, + }, + ) + + return ProceedOnSuccess(err) +} + +// CreateListenerAction creates a listener +type CreateListenerAction struct { + poolUUID string + listener *cloudscale.LoadBalancerListener +} + +func CreateListener( + poolUUID string, listener *cloudscale.LoadBalancerListener) Action { + + return &CreateListenerAction{poolUUID: poolUUID, listener: listener} +} + +func (a *CreateListenerAction) Label() string { + return fmt.Sprintf("create-listener(%s/%s)", a.poolUUID, a.listener.Name) +} + +func (a *CreateListenerAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + _, err := client.LoadBalancerListeners.Create(ctx, + &cloudscale.LoadBalancerListenerRequest{ + Pool: a.poolUUID, + Name: a.listener.Name, + Protocol: a.listener.Protocol, + ProtocolPort: a.listener.ProtocolPort, + AllowedCIDRs: a.listener.AllowedCIDRs, + TimeoutClientDataMS: a.listener.TimeoutClientDataMS, + TimeoutMemberConnectMS: a.listener.TimeoutMemberConnectMS, + TimeoutMemberDataMS: a.listener.TimeoutMemberDataMS, + }, + ) + + return ProceedOnSuccess(err) +} + +// UpdateListenerAllowedCIDRsAction updates a listener's allowed CIDRs property +type UpdateListenerAllowedCIDRsAction struct { + listenerUUID string + allowedCIDRs []string +} + +func UpdateListenerAllowedCIDRs( + listenerUUID string, allowedCIDRs []string) Action { + + return &UpdateListenerAllowedCIDRsAction{ + listenerUUID: listenerUUID, + allowedCIDRs: allowedCIDRs, + } +} + +func (a *UpdateListenerAllowedCIDRsAction) Label() string { + return fmt.Sprintf("update-cidrs(%s/%s)", + a.listenerUUID, strings.Join(a.allowedCIDRs, ",")) +} + +func (a *UpdateListenerAllowedCIDRsAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + err := client.LoadBalancerListeners.Update(ctx, + a.listenerUUID, + &cloudscale.LoadBalancerListenerRequest{ + AllowedCIDRs: a.allowedCIDRs, + }, + ) + + return ProceedOnSuccess(err) +} + +// UpdateListenerTimeoutAction updates a listener's timeout +type UpdateListenerTimeoutAction struct { + key string + listenerUUID string + timeout int +} + +func UpdateListenerTimeout( + listenerUUID string, timeout int, key string) Action { + + return &UpdateListenerTimeoutAction{ + listenerUUID: listenerUUID, + timeout: timeout, + key: key, + } +} + +func (a *UpdateListenerTimeoutAction) Label() string { + return fmt.Sprintf("update-listener-timeout-%s(%s: %dms)", + a.key, a.listenerUUID, a.timeout) +} + +func (a *UpdateListenerTimeoutAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + req := cloudscale.LoadBalancerListenerRequest{} + + switch a.key { + case "client-data-ms": + req.TimeoutClientDataMS = a.timeout + case "member-connect-ms": + req.TimeoutMemberConnectMS = a.timeout + case "member-data-ms": + req.TimeoutMemberDataMS = a.timeout + default: + return Errored, fmt.Errorf("unknown timeout key: %s", a.key) + + } + + return ProceedOnSuccess( + client.LoadBalancerListeners.Update(ctx, a.listenerUUID, &req)) +} + +// CreateHealthMonitorAction creates a health monitor +type CreateHealthMonitorAction struct { + poolUUID string + monitor *cloudscale.LoadBalancerHealthMonitor +} + +func CreateHealthMonitor( + poolUUID string, monitor *cloudscale.LoadBalancerHealthMonitor) Action { + + return &CreateHealthMonitorAction{poolUUID: poolUUID, monitor: monitor} +} + +func (a *CreateHealthMonitorAction) Label() string { + return fmt.Sprintf("create-monitor(%s/%s)", a.poolUUID, a.monitor.Type) +} + +func (a *CreateHealthMonitorAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + var http *cloudscale.LoadBalancerHealthMonitorHTTP + if a.monitor.HTTP != nil { + http = a.monitor.HTTP + } else { + http = &cloudscale.LoadBalancerHealthMonitorHTTP{} + } + + _, err := client.LoadBalancerHealthMonitors.Create(ctx, + &cloudscale.LoadBalancerHealthMonitorRequest{ + Pool: a.poolUUID, + DelayS: a.monitor.DelayS, + TimeoutS: a.monitor.TimeoutS, + UpThreshold: a.monitor.UpThreshold, + DownThreshold: a.monitor.DownThreshold, + Type: a.monitor.Type, + HTTP: &cloudscale.LoadBalancerHealthMonitorHTTPRequest{ + Method: http.Method, + UrlPath: http.UrlPath, + Version: http.Version, + Host: http.Host, + ExpectedCodes: http.ExpectedCodes, + }, + }, + ) + + return ProceedOnSuccess(err) +} + +// UpdateMonitorHTTPMethod updates a monitor's HTTP method +type UpdateMonitorHTTPMethodAction struct { + monitorUUID string + method string +} + +func UpdateMonitorHTTPMethod(monitorUUID string, method string) Action { + return &UpdateMonitorHTTPMethodAction{ + monitorUUID: monitorUUID, + method: method, + } +} + +func (a *UpdateMonitorHTTPMethodAction) Label() string { + return fmt.Sprintf( + "update-monitor-http-method (%s: %s)", a.monitorUUID, a.method) +} + +func (a *UpdateMonitorHTTPMethodAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + err := client.LoadBalancerHealthMonitors.Update(ctx, a.monitorUUID, + &cloudscale.LoadBalancerHealthMonitorRequest{ + HTTP: &cloudscale.LoadBalancerHealthMonitorHTTPRequest{ + Method: a.method, + }, + }, + ) + + return ProceedOnSuccess(err) +} + +// UpdateMonitorHTTPPath updates a monitor's HTTP path +type UpdateMonitorHTTPPathAction struct { + monitorUUID string + path string +} + +func UpdateMonitorHTTPPath(monitorUUID string, path string) Action { + return &UpdateMonitorHTTPPathAction{ + monitorUUID: monitorUUID, + path: path, + } +} + +func (a *UpdateMonitorHTTPPathAction) Label() string { + return fmt.Sprintf( + "update-monitor-http-path (%s: %s)", a.monitorUUID, a.path) +} + +func (a *UpdateMonitorHTTPPathAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + err := client.LoadBalancerHealthMonitors.Update(ctx, a.monitorUUID, + &cloudscale.LoadBalancerHealthMonitorRequest{ + HTTP: &cloudscale.LoadBalancerHealthMonitorHTTPRequest{ + UrlPath: a.path, + }, + }, + ) + + return ProceedOnSuccess(err) +} + +// UpdateMonitorHTTPHost updates a monitor's HTTP host +type UpdateMonitorHTTPHostAction struct { + monitorUUID string + host *string +} + +func UpdateMonitorHTTPHost(monitorUUID string, host *string) Action { + return &UpdateMonitorHTTPHostAction{ + monitorUUID: monitorUUID, + host: host, + } +} + +func (a *UpdateMonitorHTTPHostAction) Label() string { + return fmt.Sprintf( + "update-monitor-http-host (%s: %v)", a.monitorUUID, a.host) +} + +func (a *UpdateMonitorHTTPHostAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + err := client.LoadBalancerHealthMonitors.Update(ctx, a.monitorUUID, + &cloudscale.LoadBalancerHealthMonitorRequest{ + HTTP: &cloudscale.LoadBalancerHealthMonitorHTTPRequest{ + Host: a.host, + }, + }, + ) + + return ProceedOnSuccess(err) +} + +// UpdateMonitorHTTPExpectedCodes updates a monitor's HTTP expected codes +type UpdateMonitorHTTPExpectedCodesAction struct { + monitorUUID string + expectedCodes []string +} + +func UpdateMonitorHTTPExpectedCodes( + monitorUUID string, expectedCodes []string) Action { + + return &UpdateMonitorHTTPExpectedCodesAction{ + monitorUUID: monitorUUID, + expectedCodes: expectedCodes, + } +} + +func (a *UpdateMonitorHTTPExpectedCodesAction) Label() string { + return fmt.Sprintf( + "update-monitor-http-expected-codes (%s: %s)", + a.monitorUUID, + a.expectedCodes, + ) +} + +func (a *UpdateMonitorHTTPExpectedCodesAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + err := client.LoadBalancerHealthMonitors.Update(ctx, a.monitorUUID, + &cloudscale.LoadBalancerHealthMonitorRequest{ + HTTP: &cloudscale.LoadBalancerHealthMonitorHTTPRequest{ + ExpectedCodes: a.expectedCodes, + }, + }, + ) + + return ProceedOnSuccess(err) +} + +// UpdateMonitorNumberAction updates a monitor's numbers +type UpdateMonitorNumberAction struct { + monitorUUID string + number int + key string +} + +func UpdateMonitorNumber(monitorUUID string, number int, key string) Action { + return &UpdateMonitorNumberAction{ + key: key, + monitorUUID: monitorUUID, + number: number, + } +} + +func (a *UpdateMonitorNumberAction) Label() string { + return fmt.Sprintf("update-monitor-%s(%s: %d)", + a.key, a.monitorUUID, a.number) +} + +func (a *UpdateMonitorNumberAction) Run( + ctx context.Context, client *cloudscale.Client) (Control, error) { + + req := cloudscale.LoadBalancerHealthMonitorRequest{} + + switch a.key { + case "delay-s": + req.DelayS = a.number + case "timeout-s": + req.TimeoutS = a.number + case "up-threshold": + req.UpThreshold = a.number + case "down-threshold": + req.DownThreshold = a.number + default: + return Errored, fmt.Errorf("unknown timeout key: %s", a.key) + } + + return ProceedOnSuccess( + client.LoadBalancerHealthMonitors.Update(ctx, a.monitorUUID, &req)) +} diff --git a/pkg/internal/actions/actions_test.go b/pkg/internal/actions/actions_test.go new file mode 100644 index 0000000..d0608a8 --- /dev/null +++ b/pkg/internal/actions/actions_test.go @@ -0,0 +1,516 @@ +package actions + +import ( + "context" + "testing" + "time" + + "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/testkit" + "github.com/cloudscale-ch/cloudscale-go-sdk/v4" + "github.com/stretchr/testify/assert" +) + +func TestRefetch(t *testing.T) { + assert.NotEmpty(t, Refetch().Label()) + + v, err := Refetch().Run(context.Background(), nil) + assert.Equal(t, Refresh, v) + assert.NoError(t, err) +} + +func TestCreateLbAction(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On("/v1/load-balancers", 201, "{}") + server.Start() + defer server.Close() + + action := CreateLb(&cloudscale.LoadBalancer{ + Name: "foo", + Flavor: cloudscale.LoadBalancerFlavorStub{ + Slug: "lb-standard", + }, + VIPAddresses: []cloudscale.VIPAddress{ + {Address: "10.0.0.1", Subnet: cloudscale.SubnetStub{ + CIDR: "10.0.0.1/24", + }}, + }, + }) + + assert.NotEmpty(t, action.Label()) + v, err := action.Run(context.Background(), server.Client()) + + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + var sent cloudscale.LoadBalancerRequest + server.LastSent(&sent) + + assert.Equal(t, "foo", sent.Name) + assert.Equal(t, "lb-standard", sent.Flavor) + assert.Equal(t, "10.0.0.1", (*sent.VIPAddresses)[0].Address) +} + +func TestRenameLbAction(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On( + "/v1/load-balancers/00000000-0000-0000-0000-000000000000", 204, "") + + server.Start() + defer server.Close() + + action := RenameLb("00000000-0000-0000-0000-000000000000", "new-name") + + assert.NotEmpty(t, action.Label()) + v, err := action.Run(context.Background(), server.Client()) + + assert.NoError(t, err) + assert.Equal(t, Proceed, v) +} + +func TestAwaitLbAction(t *testing.T) { + lb := cloudscale.LoadBalancer{} + + action := AwaitLb(&lb) + assert.NotEmpty(t, action.Label()) + + lb.Status = "changing" + v, err := action.Run(context.Background(), nil) + assert.NoError(t, err) + assert.Equal(t, Refresh, v) + + lb.Status = "ready" + v, err = action.Run(context.Background(), nil) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) +} + +func TestDeleteResourceAction(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On("/v1/foo", 204, "") + server.On("/v1/bar", 403, "") + server.Start() + defer server.Close() + + action := DeleteResource("/v1/foo") + assert.NotEmpty(t, action.Label()) + + v, err := action.Run(context.Background(), server.Client()) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + action = DeleteResource("/v1/bar") + + v, err = action.Run(context.Background(), server.Client()) + assert.Error(t, err) + assert.Equal(t, Errored, v) +} + +func TestSleepAction(t *testing.T) { + action := Sleep(100 * time.Millisecond) + assert.NotEmpty(t, action.Label()) + + start := time.Now() + v, err := action.Run(context.Background(), nil) + + assert.Greater(t, time.Since(start), 100*time.Millisecond) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + start = time.Now() + v, err = action.Run(ctx, nil) + assert.Error(t, err) + assert.Equal(t, Errored, v) + assert.Greater(t, 1*time.Millisecond, time.Since(start)) +} + +func TestCreatePoolAction(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On("/v1/load-balancers/pools", 201, "{}") + server.Start() + defer server.Close() + + action := CreatePool("00000000-0000-0000-0000-000000000000", + &cloudscale.LoadBalancerPool{ + Name: "Foo", + Algorithm: "round-robin", + Protocol: "tcp", + }, + ) + + assert.NotEmpty(t, action.Label()) + + v, err := action.Run(context.Background(), server.Client()) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + var sent cloudscale.LoadBalancerPoolRequest + server.LastSent(&sent) + + assert.Equal(t, "Foo", sent.Name) + assert.Equal(t, "round-robin", sent.Algorithm) + assert.Equal(t, "tcp", sent.Protocol) + assert.Equal(t, "00000000-0000-0000-0000-000000000000", sent.LoadBalancer) +} + +func TestCreatePoolMemberAction(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On( + "/v1/load-balancers/pools/00000000-0000-0000-0000-000000000000"+ + "/members", 201, "{}") + server.Start() + defer server.Close() + + action := CreatePoolMember("00000000-0000-0000-0000-000000000000", + &cloudscale.LoadBalancerPoolMember{ + ProtocolPort: 80, + MonitorPort: 8080, + Address: "10.0.0.1", + Subnet: cloudscale.SubnetStub{ + UUID: "11111111-1111-1111-1111-111111111111", + }, + }, + ) + + assert.NotEmpty(t, action.Label()) + + v, err := action.Run(context.Background(), server.Client()) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + var sent cloudscale.LoadBalancerPoolMemberRequest + server.LastSent(&sent) + + assert.Equal(t, 80, sent.ProtocolPort) + assert.Equal(t, 8080, sent.MonitorPort) + assert.Equal(t, "10.0.0.1", sent.Address) + assert.Equal(t, "11111111-1111-1111-1111-111111111111", sent.Subnet) +} + +func TestCreateListenerAction(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On("/v1/load-balancers/listeners", 201, "{}") + server.Start() + defer server.Close() + + action := CreateListener("00000000-0000-0000-0000-000000000000", + &cloudscale.LoadBalancerListener{ + Name: "Foo", + Protocol: "tcp", + ProtocolPort: 80, + AllowedCIDRs: []string{"10.0.0.0/24"}, + TimeoutClientDataMS: 1, + TimeoutMemberConnectMS: 2, + TimeoutMemberDataMS: 3, + }, + ) + + assert.NotEmpty(t, action.Label()) + + v, err := action.Run(context.Background(), server.Client()) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + var sent cloudscale.LoadBalancerListenerRequest + server.LastSent(&sent) + + assert.Equal(t, "00000000-0000-0000-0000-000000000000", sent.Pool) + assert.Equal(t, "tcp", sent.Protocol) + assert.Equal(t, 80, sent.ProtocolPort) + assert.Equal(t, []string{"10.0.0.0/24"}, sent.AllowedCIDRs) + assert.Equal(t, 1, sent.TimeoutClientDataMS) + assert.Equal(t, 2, sent.TimeoutMemberConnectMS) + assert.Equal(t, 3, sent.TimeoutMemberDataMS) +} + +func TestUpdateListenerAllowedCIDRsAction(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On( + "/v1/load-balancers/listeners/00000000-0000-0000-0000-000000000000", + 204, "") + server.Start() + defer server.Close() + + action := UpdateListenerAllowedCIDRs( + "00000000-0000-0000-0000-000000000000", []string{"10.0.0.0/24"}) + + assert.NotEmpty(t, action.Label()) + + v, err := action.Run(context.Background(), server.Client()) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + var sent cloudscale.LoadBalancerListenerRequest + server.LastSent(&sent) + + assert.Equal(t, []string{"10.0.0.0/24"}, sent.AllowedCIDRs) +} + +func TestUpdateListenerTimeoutAction(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On( + "/v1/load-balancers/listeners/00000000-0000-0000-0000-000000000000", + 204, "") + server.Start() + defer server.Close() + + // TimeoutClientDataMS + action := UpdateListenerTimeout( + "00000000-0000-0000-0000-000000000000", + 10, + "client-data-ms", + ) + assert.NotEmpty(t, action.Label()) + + v, err := action.Run(context.Background(), server.Client()) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + var sent cloudscale.LoadBalancerListenerRequest + server.LastSent(&sent) + assert.Equal(t, 10, sent.TimeoutClientDataMS) + + // TimeoutMemberConnectMS + action = UpdateListenerTimeout( + "00000000-0000-0000-0000-000000000000", + 20, + "member-connect-ms", + ) + + _, _ = action.Run(context.Background(), server.Client()) + server.LastSent(&sent) + assert.Equal(t, 20, sent.TimeoutMemberConnectMS) + + // TimeoutMemberDataMS + action = UpdateListenerTimeout( + "00000000-0000-0000-0000-000000000000", + 30, + "member-data-ms", + ) + + _, _ = action.Run(context.Background(), server.Client()) + server.LastSent(&sent) + assert.Equal(t, 30, sent.TimeoutMemberDataMS) + + // Something unknown + action = UpdateListenerTimeout( + "00000000-0000-0000-0000-000000000000", + 30, + "foo", + ) + + v, err = action.Run(context.Background(), server.Client()) + assert.Error(t, err) + assert.Equal(t, Errored, v) +} + +func TestCreateHealthMonitorAction(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On("/v1/load-balancers/health-monitors", 201, "{}") + server.Start() + defer server.Close() + + host := "foo" + action := CreateHealthMonitor("00000000-0000-0000-0000-000000000000", + &cloudscale.LoadBalancerHealthMonitor{ + DelayS: 1, + TimeoutS: 2, + UpThreshold: 3, + DownThreshold: 4, + Type: "https", + HTTP: &cloudscale.LoadBalancerHealthMonitorHTTP{ + ExpectedCodes: []string{"200"}, + Method: "GET", + UrlPath: "/livez", + Version: "1.1", + Host: &host, + }, + }, + ) + + assert.NotEmpty(t, action.Label()) + + v, err := action.Run(context.Background(), server.Client()) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + var sent cloudscale.LoadBalancerHealthMonitorRequest + server.LastSent(&sent) + + assert.Equal(t, "00000000-0000-0000-0000-000000000000", sent.Pool) + assert.Equal(t, 1, sent.DelayS) + assert.Equal(t, 2, sent.TimeoutS) + assert.Equal(t, 3, sent.UpThreshold) + assert.Equal(t, 4, sent.DownThreshold) + assert.Equal(t, "https", sent.Type) + assert.Equal(t, []string{"200"}, sent.HTTP.ExpectedCodes) + assert.Equal(t, "GET", sent.HTTP.Method) + assert.Equal(t, "/livez", sent.HTTP.UrlPath) + assert.Equal(t, "1.1", sent.HTTP.Version) + assert.Equal(t, "foo", *sent.HTTP.Host) +} + +func TestUpdateMonitorHTTPMethod(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On( + "/v1/load-balancers/health-monitors"+ + "/00000000-0000-0000-0000-000000000000", 204, "") + server.Start() + defer server.Close() + + action := UpdateMonitorHTTPMethod( + "00000000-0000-0000-0000-000000000000", "HEAD") + + assert.NotEmpty(t, action.Label()) + + v, err := action.Run(context.Background(), server.Client()) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + var sent cloudscale.LoadBalancerHealthMonitorRequest + server.LastSent(&sent) + + assert.Equal(t, "HEAD", sent.HTTP.Method) +} + +func TestUpdateMonitorHTTPHost(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On( + "/v1/load-balancers/health-monitors"+ + "/00000000-0000-0000-0000-000000000000", 204, "") + server.Start() + defer server.Close() + + host := "Foo" + action := UpdateMonitorHTTPHost( + "00000000-0000-0000-0000-000000000000", &host) + + assert.NotEmpty(t, action.Label()) + + v, err := action.Run(context.Background(), server.Client()) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + var sent cloudscale.LoadBalancerHealthMonitorRequest + server.LastSent(&sent) + + assert.Equal(t, "Foo", *sent.HTTP.Host) +} + +func TestUpdateMonitorHTTPPath(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On( + "/v1/load-balancers/health-monitors"+ + "/00000000-0000-0000-0000-000000000000", 204, "") + server.Start() + defer server.Close() + + action := UpdateMonitorHTTPPath( + "00000000-0000-0000-0000-000000000000", "/foo") + + assert.NotEmpty(t, action.Label()) + + v, err := action.Run(context.Background(), server.Client()) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + var sent cloudscale.LoadBalancerHealthMonitorRequest + server.LastSent(&sent) + + assert.Equal(t, "/foo", sent.HTTP.UrlPath) +} + +func TestUpdateMonitorHTTPExpectedCodes(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On( + "/v1/load-balancers/health-monitors"+ + "/00000000-0000-0000-0000-000000000000", 204, "") + server.Start() + defer server.Close() + + action := UpdateMonitorHTTPExpectedCodes( + "00000000-0000-0000-0000-000000000000", []string{"202"}) + + assert.NotEmpty(t, action.Label()) + + v, err := action.Run(context.Background(), server.Client()) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + var sent cloudscale.LoadBalancerHealthMonitorRequest + server.LastSent(&sent) + + assert.Equal(t, []string{"202"}, sent.HTTP.ExpectedCodes) +} + +func TestUpdateMonitorNumberAction(t *testing.T) { + server := testkit.NewMockAPIServer() + server.On( + "/v1/load-balancers/health-monitors"+ + "/00000000-0000-0000-0000-000000000000", 204, "") + server.Start() + defer server.Close() + + // DelayS + action := UpdateMonitorNumber( + "00000000-0000-0000-0000-000000000000", + 1, + "delay-s", + ) + assert.NotEmpty(t, action.Label()) + + v, err := action.Run(context.Background(), server.Client()) + assert.NoError(t, err) + assert.Equal(t, Proceed, v) + + var sent cloudscale.LoadBalancerHealthMonitorRequest + server.LastSent(&sent) + + assert.Equal(t, 1, sent.DelayS) + + // TimeoutS + action = UpdateMonitorNumber( + "00000000-0000-0000-0000-000000000000", + 1, + "timeout-s", + ) + + _, _ = action.Run(context.Background(), server.Client()) + server.LastSent(&sent) + assert.Equal(t, 1, sent.TimeoutS) + + // UpThreshold + action = UpdateMonitorNumber( + "00000000-0000-0000-0000-000000000000", + 1, + "up-threshold", + ) + + _, _ = action.Run(context.Background(), server.Client()) + server.LastSent(&sent) + assert.Equal(t, 1, sent.UpThreshold) + + // DownThreshold + action = UpdateMonitorNumber( + "00000000-0000-0000-0000-000000000000", + 1, + "down-threshold", + ) + + _, _ = action.Run(context.Background(), server.Client()) + server.LastSent(&sent) + assert.Equal(t, 1, sent.DownThreshold) + + // Something unknown + action = UpdateMonitorNumber( + "00000000-0000-0000-0000-000000000000", + 1, + "foo", + ) + + v, err = action.Run(context.Background(), server.Client()) + assert.Error(t, err) + assert.Equal(t, Errored, v) +} diff --git a/pkg/internal/actions/control.go b/pkg/internal/actions/control.go new file mode 100644 index 0000000..aa8efbc --- /dev/null +++ b/pkg/internal/actions/control.go @@ -0,0 +1,25 @@ +package actions + +type Control int + +const ( + Errored Control = 0 + Proceed Control = 10 + Refresh Control = 20 +) + +func ProceedOnSuccess(err error) (Control, error) { + if err != nil { + return Errored, err + } + + return Proceed, nil +} + +func RefreshOnSuccess(err error) (Control, error) { + if err != nil { + return Errored, err + } + + return Refresh, nil +} diff --git a/pkg/internal/compare/compare.go b/pkg/internal/compare/compare.go new file mode 100644 index 0000000..3821fe4 --- /dev/null +++ b/pkg/internal/compare/compare.go @@ -0,0 +1,65 @@ +package compare + +// Diff returns a list of items to delete, and a list of items to create, by +// comparing a desired list to an actual list. To compare, a function that +// takes a single item, and returns a string key is used. +// +// The string key is used for a map, to avoid having to compare all desired +// items to all actual items. It also is easier to write a key function, than +// it is to write a comparison function. +func Diff[T any]( + desired []T, + actual []T, + key func(item T) string, +) (delete []T, create []T) { + + d := make(map[string]T) + a := make(map[string]T) + + for _, i := range desired { + d[key(i)] = i + } + + for _, i := range actual { + a[key(i)] = i + } + + for k, i := range d { + _, ok := a[k] + + if !ok { + create = append(create, i) + } + } + + for k, i := range a { + _, ok := d[k] + + if !ok { + delete = append(delete, i) + } + } + + return delete, create +} + +// Match returns a list of items that match (each item in the list is a +// tuple of matching items). +func Match[T any](as []T, bs []T, key func(item T) string) [][]T { + keys := make(map[string]T) + matches := make([][]T, 0) + + for _, b := range bs { + keys[key(b)] = b + } + + for _, a := range as { + v, ok := keys[key(a)] + + if ok { + matches = append(matches, []T{a, v}) + } + } + + return matches +} diff --git a/pkg/internal/compare/compare_test.go b/pkg/internal/compare/compare_test.go new file mode 100644 index 0000000..b389640 --- /dev/null +++ b/pkg/internal/compare/compare_test.go @@ -0,0 +1,63 @@ +package compare + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDiff(t *testing.T) { + + type Package struct { + Name string + Version string + } + + desired := []Package{ + {Name: "Winamp", Version: "2.9"}, + {Name: "Firefox", Version: "Nightly"}, + } + actual := []Package{ + {Name: "Winamp", Version: "2.8"}, + {Name: "Firefox", Version: "Nightly"}, + } + + del, add := Diff(desired, actual, func(p Package) string { + return fmt.Sprintf( + p.Name, + p.Version, + ) + }) + + assert.Equal(t, []Package{{Name: "Winamp", Version: "2.8"}}, del) + assert.Equal(t, []Package{{Name: "Winamp", Version: "2.9"}}, add) +} + +func TestMatch(t *testing.T) { + + type Package struct { + Name string + Version string + } + + old := []Package{ + {Name: "Winamp", Version: "2.8"}, + } + new := []Package{ + {Name: "Winamp", Version: "2.9"}, + } + + matches := Match(old, new, func(p Package) string { + return fmt.Sprintf( + p.Name, + ) + }) + + assert.Equal(t, [][]Package{ + { + {Name: "Winamp", Version: "2.8"}, + {Name: "Winamp", Version: "2.9"}, + }, + }, matches) +} diff --git a/pkg/internal/integration/main_test.go b/pkg/internal/integration/main_test.go index d95dece..c1f9534 100644 --- a/pkg/internal/integration/main_test.go +++ b/pkg/internal/integration/main_test.go @@ -6,18 +6,15 @@ import ( "context" "fmt" "log" + "math/rand" "os" - "strings" "testing" "time" - "github.com/cloudscale-ch/cloudscale-go-sdk" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + cloudscale "github.com/cloudscale-ch/cloudscale-go-sdk/v4" "github.com/stretchr/testify/suite" "golang.org/x/oauth2" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" @@ -28,7 +25,7 @@ func TestMain(m *testing.M) { os.Exit(exitStatus) } -func TestIntegrationTestSuite(t *testing.T) { +func TestIntegration(t *testing.T) { suite.Run(t, new(IntegrationTestSuite)) } @@ -36,6 +33,7 @@ type IntegrationTestSuite struct { suite.Suite k8s kubernetes.Interface api *cloudscale.Client + ns string } func (s *IntegrationTestSuite) SetupSuite() { @@ -77,202 +75,34 @@ func (s *IntegrationTestSuite) SetupSuite() { s.api = cloudscale.NewClient(httpClient) } -func (s *IntegrationTestSuite) Nodes() []v1.Node { - nodes, err := s.k8s.CoreV1().Nodes().List( - context.Background(), - metav1.ListOptions{}, - ) - - assert.NoError(s.T(), err) - return nodes.Items -} - -func (s *IntegrationTestSuite) NodeNamed(name string) *v1.Node { - node, err := s.k8s.CoreV1().Nodes().Get( - context.Background(), name, metav1.GetOptions{}, - ) - - if err != nil && errors.IsNotFound(err) { - return nil - } - - assert.NoError(s.T(), err) - return node -} +func (s *IntegrationTestSuite) BeforeTest(suite string, test string) { + s.ns = fmt.Sprintf("cloudscale-test-%08x", rand.Uint32()) -func (s *IntegrationTestSuite) NodesLabeled(selector string) []v1.Node { - nodes, err := s.k8s.CoreV1().Nodes().List( + _, err := s.k8s.CoreV1().Namespaces().Create( context.Background(), - metav1.ListOptions{ - LabelSelector: selector, + &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.ns, + }, }, + metav1.CreateOptions{}, ) - assert.NoError(s.T(), err) - return nodes.Items -} - -func (s *IntegrationTestSuite) NodesFiltered(fn func(*v1.Node) bool) []v1.Node { - nodes := s.Nodes() - matches := make([]v1.Node, 0, len(nodes)) - - for _, n := range nodes { - if fn(&n) { - matches = append(matches, n) - } + if err != nil { + panic(fmt.Sprintf("could not create namespace %s: %s", s.ns, err)) } - - return matches } -func (s *IntegrationTestSuite) Servers() []cloudscale.Server { - servers, err := s.api.Servers.List( +func (s *IntegrationTestSuite) AfterTest(suite string, test string) { + err := s.k8s.CoreV1().Namespaces().Delete( context.Background(), - cloudscale.WithTagFilter( - cloudscale.TagMap{ - "source": "k8test", - }, - ), + s.ns, + metav1.DeleteOptions{}, ) - assert.NoError(s.T(), err, "could not list servers") - return servers -} - -func (s *IntegrationTestSuite) ServerNamed(name string) *cloudscale.Server { - for _, server := range s.Servers() { - if server.Name == name { - return &server - } - } - - return nil -} - -func (s *IntegrationTestSuite) TestKubernetesReady() { - - // Make sure we have at least one control, and some workers - controls := s.NodesLabeled("node-role.kubernetes.io/control-plane") - assert.True(s.T(), len(controls) > 0, "no controls found") - - nodes := s.Nodes() - assert.True(s.T(), len(nodes) > len(controls), "no nodes found") -} - -func (s *IntegrationTestSuite) TestNodesInitialized() { - - // None of the nodes should be uninitailized (this taint is removed, once - // the CCM has responded). - nodes := s.NodesFiltered(func(n *v1.Node) bool { - for _, t := range n.Spec.Taints { - if t.Key == "node.cloudprovider.kubernetes.io/uninitialized" { - return true - } - } - return false - }) - assert.True(s.T(), len(nodes) == 0, "found uninitialized nodes") - -} - -func (s *IntegrationTestSuite) TestNodeMetadata() { - assertMetadata := func(server cloudscale.Server) { - node := s.NodeNamed(server.Name) - - assert.NotNil(s.T(), server, "server name not found:", server.Name) - assert.NotNil(s.T(), node, "node name not found:", server.Name) - - assert.Equal(s.T(), - fmt.Sprintf("cloudscale://%s", server.UUID), - string(node.Spec.ProviderID), - "node has wrong provider id: %s", node.Name) - - assert.Equal(s.T(), - server.Flavor.Slug, - node.Labels["node.kubernetes.io/instance-type"], - "node has wrong flavor: %s", node.Name) - - assert.Equal(s.T(), - strings.Trim(server.Zone.Slug, "0123456789"), - node.Labels["topology.kubernetes.io/region"], - "node has wrong region: %s", node.Name) - - assert.Equal(s.T(), - server.Zone.Slug, - node.Labels["topology.kubernetes.io/zone"], - "node has wrong zone: %s", node.Name) - - assert.Equal(s.T(), - node.Status.Addresses[0], - v1.NodeAddress{ - Type: v1.NodeHostName, - Address: server.Name, - }, - "node has wrong hostname node-address: %s", node.Name) - - assert.Equal(s.T(), - node.Status.Addresses[1], - v1.NodeAddress{ - Type: v1.NodeExternalIP, - Address: server.Interfaces[0].Addresses[0].Address, - }, - "node has wrong public ipv4 node-address: %s", node.Name) - - assert.Equal(s.T(), - node.Status.Addresses[2], - v1.NodeAddress{ - Type: v1.NodeExternalIP, - Address: server.Interfaces[0].Addresses[1].Address, - }, - "node has wrong public ipv6 node-address: %s", node.Name) - } - - for _, server := range s.Servers() { - assertMetadata(server) - } -} - -func (s *IntegrationTestSuite) TestRestartServer() { - shutdownNodes := func() []v1.Node { - return s.NodesFiltered(func(n *v1.Node) bool { - for _, t := range n.Spec.Taints { - if t.Key == "node.cloudprovider.kubernetes.io/shutdown" { - return true - } - } - return false - }) - } - require.Len(s.T(), shutdownNodes(), 0, "no nodes may be shutdown yet") - - // Shutdown the server - server := s.ServerNamed("k8test-worker-1") - err := s.api.Servers.Stop(context.Background(), server.UUID) - assert.NoError(s.T(), err, "could not stop server %s", server.Name) - - // Wait for that to propagate (this includes some time to wait for the - // server to actually shutdown) - start := time.Now() - for time.Since(start) < (120 * time.Second) { - if len(shutdownNodes()) == 1 { - break - } - time.Sleep(1 * time.Second) - } - - assert.Len(s.T(), shutdownNodes(), 1, "no shutdown node found") - - // Start the server - err = s.api.Servers.Start(context.Background(), server.UUID) - assert.NoError(s.T(), err, "could not start server %s", server.Name) - - start = time.Now() - for time.Since(start) < (120 * time.Second) { - if len(shutdownNodes()) == 0 { - break - } - time.Sleep(1 * time.Second) + if err != nil { + panic(fmt.Sprintf("could not delete namespace %s: %s", s.ns, err)) } - assert.Len(s.T(), shutdownNodes(), 0, "node not detected as started") + s.ns = "" } diff --git a/pkg/internal/integration/node_test.go b/pkg/internal/integration/node_test.go new file mode 100644 index 0000000..10a4884 --- /dev/null +++ b/pkg/internal/integration/node_test.go @@ -0,0 +1,217 @@ +//go:build integration + +package integration + +import ( + "context" + "fmt" + "strings" + "time" + + cloudscale "github.com/cloudscale-ch/cloudscale-go-sdk/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (s *IntegrationTestSuite) Nodes() []v1.Node { + nodes, err := s.k8s.CoreV1().Nodes().List( + context.Background(), + metav1.ListOptions{}, + ) + + assert.NoError(s.T(), err) + return nodes.Items +} + +func (s *IntegrationTestSuite) NodeNamed(name string) *v1.Node { + node, err := s.k8s.CoreV1().Nodes().Get( + context.Background(), name, metav1.GetOptions{}, + ) + + if err != nil && errors.IsNotFound(err) { + return nil + } + + assert.NoError(s.T(), err) + return node +} + +func (s *IntegrationTestSuite) NodesLabeled(selector string) []v1.Node { + nodes, err := s.k8s.CoreV1().Nodes().List( + context.Background(), + metav1.ListOptions{ + LabelSelector: selector, + }, + ) + + assert.NoError(s.T(), err) + return nodes.Items +} + +func (s *IntegrationTestSuite) NodesFiltered(fn func(*v1.Node) bool) []v1.Node { + nodes := s.Nodes() + matches := make([]v1.Node, 0, len(nodes)) + + for _, n := range nodes { + if fn(&n) { + matches = append(matches, n) + } + } + + return matches +} + +func (s *IntegrationTestSuite) Servers() []cloudscale.Server { + servers, err := s.api.Servers.List( + context.Background(), + cloudscale.WithTagFilter( + cloudscale.TagMap{ + "source": "k8test", + }, + ), + ) + assert.NoError(s.T(), err, "could not list servers") + return servers +} + +func (s *IntegrationTestSuite) ServerNamed(name string) *cloudscale.Server { + for _, server := range s.Servers() { + if server.Name == name { + return &server + } + } + + return nil +} + +func (s *IntegrationTestSuite) TestNodesReady() { + + // Make sure we have at least one control, and some workers + controls := s.NodesLabeled("node-role.kubernetes.io/control-plane") + assert.True(s.T(), len(controls) > 0, "no controls found") + + nodes := s.Nodes() + assert.True(s.T(), len(nodes) > len(controls), "no nodes found") +} + +func (s *IntegrationTestSuite) TestNodesInitialized() { + + // None of the nodes should be uninitailized (this taint is removed, once + // the CCM has responded). + nodes := s.NodesFiltered(func(n *v1.Node) bool { + for _, t := range n.Spec.Taints { + if t.Key == "node.cloudprovider.kubernetes.io/uninitialized" { + return true + } + } + return false + }) + assert.True(s.T(), len(nodes) == 0, "found uninitialized nodes") + +} + +func (s *IntegrationTestSuite) TestNodeMetadata() { + assertMetadata := func(server cloudscale.Server) { + node := s.NodeNamed(server.Name) + + assert.NotNil(s.T(), server, "server name not found:", server.Name) + assert.NotNil(s.T(), node, "node name not found:", server.Name) + + assert.Equal(s.T(), + fmt.Sprintf("cloudscale://%s", server.UUID), + string(node.Spec.ProviderID), + "node has wrong provider id: %s", node.Name) + + assert.Equal(s.T(), + server.Flavor.Slug, + node.Labels["node.kubernetes.io/instance-type"], + "node has wrong flavor: %s", node.Name) + + assert.Equal(s.T(), + strings.Trim(server.Zone.Slug, "0123456789"), + node.Labels["topology.kubernetes.io/region"], + "node has wrong region: %s", node.Name) + + assert.Equal(s.T(), + server.Zone.Slug, + node.Labels["topology.kubernetes.io/zone"], + "node has wrong zone: %s", node.Name) + + assert.Equal(s.T(), + node.Status.Addresses[0], + v1.NodeAddress{ + Type: v1.NodeHostName, + Address: server.Name, + }, + "node has wrong hostname node-address: %s", node.Name) + + assert.Equal(s.T(), + node.Status.Addresses[1], + v1.NodeAddress{ + Type: v1.NodeExternalIP, + Address: server.Interfaces[0].Addresses[0].Address, + }, + "node has wrong public ipv4 node-address: %s", node.Name) + + assert.Equal(s.T(), + node.Status.Addresses[2], + v1.NodeAddress{ + Type: v1.NodeExternalIP, + Address: server.Interfaces[0].Addresses[1].Address, + }, + "node has wrong public ipv6 node-address: %s", node.Name) + } + + for _, server := range s.Servers() { + assertMetadata(server) + } +} + +func (s *IntegrationTestSuite) TestNodeRestartServer() { + shutdownNodes := func() []v1.Node { + return s.NodesFiltered(func(n *v1.Node) bool { + for _, t := range n.Spec.Taints { + if t.Key == "node.cloudprovider.kubernetes.io/shutdown" { + return true + } + } + return false + }) + } + + require.Len(s.T(), shutdownNodes(), 0, "no nodes may be shutdown yet") + + // Shutdown the server + server := s.ServerNamed("k8test-worker-1") + err := s.api.Servers.Stop(context.Background(), server.UUID) + assert.NoError(s.T(), err, "could not stop server %s", server.Name) + + // Wait for that to propagate (this includes some time to wait for the + // server to actually shutdown) + start := time.Now() + for time.Since(start) < (120 * time.Second) { + if len(shutdownNodes()) == 1 { + break + } + time.Sleep(1 * time.Second) + } + + assert.Len(s.T(), shutdownNodes(), 1, "no shutdown node found") + + // Start the server + err = s.api.Servers.Start(context.Background(), server.UUID) + assert.NoError(s.T(), err, "could not start server %s", server.Name) + + start = time.Now() + for time.Since(start) < (120 * time.Second) { + if len(shutdownNodes()) == 0 { + break + } + time.Sleep(1 * time.Second) + } + + assert.Len(s.T(), shutdownNodes(), 0, "node not detected as started") +} diff --git a/pkg/internal/integration/service_test.go b/pkg/internal/integration/service_test.go new file mode 100644 index 0000000..a4e0b90 --- /dev/null +++ b/pkg/internal/integration/service_test.go @@ -0,0 +1,328 @@ +//go:build integration + +package integration + +import ( + "context" + "io" + "net/netip" + "strings" + "time" + + "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/cloudscale_ccm" + "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/kubeutil" + "github.com/cloudscale-ch/cloudscale-cloud-controller-manager/pkg/internal/testkit" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func (s *IntegrationTestSuite) CreateDeployment( + name string, image string, replicas int32, port int32, args []string) { + + spec := appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": name, + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": name, + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: name, + Image: image, + Args: args, + Ports: []v1.ContainerPort{ + {ContainerPort: port}, + }, + }, + }, + }, + }, + } + + _, err := s.k8s.AppsV1().Deployments(s.ns).Create( + context.Background(), + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: spec, + }, + metav1.CreateOptions{}, + ) + + s.Require().NoError(err) +} + +func (s *IntegrationTestSuite) ExposeDeployment( + name string, port int32, targetPort int32) { + + spec := v1.ServiceSpec{ + Type: v1.ServiceTypeLoadBalancer, + Selector: map[string]string{ + "app": name, + }, + Ports: []v1.ServicePort{ + { + Protocol: v1.ProtocolTCP, + Port: port, + TargetPort: intstr.FromInt32(targetPort), + }, + }, + } + + _, err := s.k8s.CoreV1().Services(s.ns).Create( + context.Background(), + &v1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: spec, + }, + metav1.CreateOptions{}, + ) + + s.Require().NoError(err) +} + +// CCMLogs returns all the logs of the CCM since the given time. +func (s *IntegrationTestSuite) CCMLogs(start time.Time) string { + + pods, err := s.k8s.CoreV1().Pods("kube-system").List( + context.Background(), + metav1.ListOptions{ + LabelSelector: "k8s-app=cloudscale-cloud-controller-manager", + }, + ) + s.Require().NoError(err) + + st := metav1.NewTime(start) + options := v1.PodLogOptions{ + SinceTime: &st, + } + + output := "" + for _, pod := range pods.Items { + logs := s.k8s.CoreV1(). + Pods("kube-system"). + GetLogs(pod.Name, &options) + + stream, err := logs.Stream(context.Background()) + s.Require().NoError(err) + defer stream.Close() + + bytes, err := io.ReadAll(stream) + s.Require().NoError(err) + + output += string(bytes) + } + + return output +} + +func (s *IntegrationTestSuite) ServiceNamed(name string) *v1.Service { + service, err := s.k8s.CoreV1().Services(s.ns).Get( + context.Background(), name, metav1.GetOptions{}, + ) + + if err != nil && errors.IsNotFound(err) { + return nil + } + + s.Require().NoError(err) + return service +} + +func (s *IntegrationTestSuite) AwaitServiceReady( + name string, timeout time.Duration) *v1.Service { + + var service *v1.Service + start := time.Now() + + for time.Since(start) < timeout { + service = s.ServiceNamed(name) + s.Require().NotNil(service) + + if service.Annotations != nil { + return service + } + time.Sleep(1 * time.Second) + } + + return nil +} + +func (s *IntegrationTestSuite) TestServiceEndToEnd() { + + // Note the start for the log + start := time.Now() + + // Deploy a TCP server that returns the hostname + s.T().Log("Creating hostname deployment") + s.CreateDeployment("hostname", "alpine/socat", 2, 8080, []string{ + `TCP-LISTEN:8080,fork`, + `SYSTEM:'echo $HOSTNAME'`, + }) + + // Expose the deployment using a LoadBalancer service + s.ExposeDeployment("hostname", 80, 8080) + + // Wait for the service to be ready + s.T().Log("Waiting for hostname service to be ready") + service := s.AwaitServiceReady("hostname", 180*time.Second) + s.Require().NotNil(service) + + // Ensure the annotations are set + s.Assert().NotEmpty( + service.Annotations[cloudscale_ccm.LoadBalancerUUID]) + s.Assert().NotEmpty( + service.Annotations[cloudscale_ccm.LoadBalancerConfigVersion]) + s.Assert().NotEmpty( + service.Annotations[cloudscale_ccm.LoadBalancerZone]) + + // Ensure we have two public IP addresses + s.Require().Len(service.Status.LoadBalancer.Ingress, 2) + addr := service.Status.LoadBalancer.Ingress[0].IP + + // Ensure that we get responses from two different pods (round-robin) + s.T().Log("Verifying hostname service responses") + responses := make(map[string]int) + for i := 0; i < 100; i++ { + output, err := testkit.TCPRead(addr, 80) + s.Assert().NoError(err) + + if output != "" { + responses[output]++ + } + + time.Sleep(50 * time.Millisecond) + } + + s.Assert().Len(responses, 2) + + // In this simple case we expect no errors nor warnings + s.T().Log("Checking log output for errors/warnings") + lines := s.CCMLogs(start) + + s.Assert().NotContains(lines, "error") + s.Assert().NotContains(lines, "Error") + s.Assert().NotContains(lines, "warn") + s.Assert().NotContains(lines, "Warn") +} + +func (s *IntegrationTestSuite) TestServiceTrafficPolicyLocal() { + + // Traffic received via default "Cluster" policy is snatted via node. + cluster_policy_prefix := netip.MustParsePrefix("10.0.0.0/16") + + // Traffic received via "Local" policy has no natting. The address is + // going to be private network address of the load balancer. + local_policy_prefix := netip.MustParsePrefix("10.100.10.0/24") + + // Deploy a TCP server that returns the remote IP address. Only use a + // single instance as we want to check that the routing works right with + // all policies. + s.T().Log("Creating peeraddr deployment") + s.CreateDeployment("peeraddr", "alpine/socat", 1, 8080, []string{ + `TCP-LISTEN:8080,fork`, + `SYSTEM:'echo $SOCAT_PEERADDR'`, + }) + + // Waits until the request is received through the given prefix and + // ten responses with the expected address come back. + assertPrefix := func(addr string, prefix *netip.Prefix) { + successful := 0 + + for i := 0; i < 45; i++ { + time.Sleep(1 * time.Second) + + peer, err := testkit.TCPRead(addr, 80) + if err != nil { + continue + } + + if strings.Trim(peer, "\n") == "" { + continue + } + + peerIP := netip.MustParseAddr(strings.Trim(peer, "\n")) + if !prefix.Contains(peerIP) { + continue + } + + successful++ + + if successful >= 15 { + break + } + } + + s.Assert().GreaterOrEqual(successful, 15) + } + + // Ensures the traffic is handled without unexpected delay + assertFastResponses := func(addr string, prefix *netip.Prefix) { + for i := 0; i < 60; i++ { + before := time.Now() + _, err := testkit.TCPRead(addr, 80) + after := time.Now() + + // Bad requests take around 5s as they hit a timeout + s.Assert().WithinDuration(before, after, 1000*time.Millisecond) + s.Assert().NoError(err) + } + } + + // Expose the deployment using a LoadBalancer service + s.ExposeDeployment("peeraddr", 80, 8080) + + // Wait for the service to be ready + s.T().Log("Waiting for peeraddr service to be ready") + service := s.AwaitServiceReady("peeraddr", 180*time.Second) + s.Require().NotNil(service) + + // In its initial state, expect a natted IP address + addr := service.Status.LoadBalancer.Ingress[0].IP + + assertPrefix(addr, &cluster_policy_prefix) + assertFastResponses(addr, &cluster_policy_prefix) + + // Configure the service to use the local traffic policy + s.T().Log("Switching peeraddr service to 'Local' traffic policy") + err := kubeutil.PatchServiceExternalTrafficPolicy( + context.Background(), + s.k8s, + service, + v1.ServiceExternalTrafficPolicyTypeLocal, + ) + s.Require().NoError(err) + + service = s.AwaitServiceReady("peeraddr", 1*time.Second) + s.Require().NotNil(service) + + // Now expect to see an IP address from the node's private network + assertPrefix(addr, &local_policy_prefix) + assertFastResponses(addr, &local_policy_prefix) + + // Go back to the Cluster policy + s.T().Log("Switching peeraddr service back to 'Cluster' traffic policy") + err = kubeutil.PatchServiceExternalTrafficPolicy( + context.Background(), + s.k8s, + service, + v1.ServiceExternalTrafficPolicyTypeCluster, + ) + s.Require().NoError(err) + + service = s.AwaitServiceReady("peeraddr", 1*time.Second) + s.Require().NotNil(service) + + assertPrefix(addr, &cluster_policy_prefix) + assertFastResponses(addr, &cluster_policy_prefix) +} diff --git a/pkg/internal/kubeutil/annotate.go b/pkg/internal/kubeutil/annotate.go new file mode 100644 index 0000000..3e9b8f7 --- /dev/null +++ b/pkg/internal/kubeutil/annotate.go @@ -0,0 +1,131 @@ +package kubeutil + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" +) + +// AnnotateService takes a list of key/value pairs and applies them as +// annotations using JSON patch (https://jsonpatch.com/). +func AnnotateService( + ctx context.Context, + client kubernetes.Interface, + service *v1.Service, + kv ...string, +) error { + if len(kv) == 0 { + return nil + } + + if len(kv)%2 != 0 { + return errors.New("expected an even number of arguments (key, value)") + } + + if client == nil { + return errors.New("no valid kubernetes client given") + } + + operations := make([]map[string]any, 0, len(kv)/2) + + if service.Annotations == nil { + operations = append(operations, + map[string]any{ + "op": "add", + "path": "/metadata/annotations", + "value": map[string]any{}, + }, + ) + } + + for ix := range kv { + if ix%2 != 0 { + continue + } + + k := kv[ix] + v := kv[ix+1] + + if service.Annotations != nil && service.Annotations[k] == v { + continue + } + + // https://www.rfc-editor.org/rfc/rfc6901#section-3 + k = strings.ReplaceAll(k, "~", "~0") + k = strings.ReplaceAll(k, "/", "~1") + + path := fmt.Sprintf("/metadata/annotations/%s", k) + + operations = append(operations, map[string]any{ + "op": "add", + "path": path, + "value": v, + }) + } + + if len(operations) == 0 { + return nil + } + + return PatchService(ctx, client, service, operations) +} + +// PatchServices applies the given patch operations on the given service +func PatchService( + ctx context.Context, + client kubernetes.Interface, + service *v1.Service, + operations []map[string]any, +) error { + + patch, err := json.Marshal(&operations) + if err != nil { + return fmt.Errorf("failed to encode patch operations: %w", err) + } + + _, err = client.CoreV1().Services(service.Namespace).Patch( + ctx, + service.Name, + types.JSONPatchType, + patch, + metav1.PatchOptions{}, + ) + + if err != nil { + return fmt.Errorf( + "failed to apply patch to %s: %w", service.Name, err) + } + + return nil +} + +// PatchServiceExternalTrafficPolicy patches the external traffic policy of +// the given service +func PatchServiceExternalTrafficPolicy( + ctx context.Context, + client kubernetes.Interface, + service *v1.Service, + policy v1.ServiceExternalTrafficPolicy, +) error { + + if service.Spec.ExternalTrafficPolicy == policy { + return nil + } + + operations := []map[string]any{ + { + "op": "replace", + "path": "/spec/externalTrafficPolicy", + "value": string(policy), + }, + } + + return PatchService(ctx, client, service, operations) +} diff --git a/pkg/cloudscale_ccm/limiter.go b/pkg/internal/limiter/limiter.go similarity index 52% rename from pkg/cloudscale_ccm/limiter.go rename to pkg/internal/limiter/limiter.go index a8208f5..9a880bd 100644 --- a/pkg/cloudscale_ccm/limiter.go +++ b/pkg/internal/limiter/limiter.go @@ -1,23 +1,31 @@ -package cloudscale_ccm +package limiter import "fmt" -// limiter is used to wrap slice responses with functions to assert that +// Limiter is used to wrap slice responses with functions to assert that // an expected number of elements was found. -type limiter[T any] struct { +type Limiter[T any] struct { Error error elements []T } -func newLimiter[T any](err error, elements ...T) *limiter[T] { - return &limiter[T]{ +func New[T any](err error, elements ...T) *Limiter[T] { + return &Limiter[T]{ Error: err, elements: elements, } } -// one returns exactly one item, or an error. -func (t *limiter[T]) one() (*T, error) { +// All returns the full set of answers +func (t *Limiter[T]) All() ([]T, error) { + if t.Error != nil { + return nil, t.Error + } + return t.elements, nil +} + +// One returns exactly One item, or an error. +func (t *Limiter[T]) One() (*T, error) { if t.Error != nil { return nil, t.Error } @@ -30,8 +38,8 @@ func (t *limiter[T]) one() (*T, error) { return &t.elements[0], nil } -// none returns nil if there is no element, or an error -func (t *limiter[T]) none() error { +// None returns nil if there is no element, or an error +func (t *Limiter[T]) None() error { if t.Error != nil { return t.Error } @@ -41,8 +49,8 @@ func (t *limiter[T]) none() error { return nil } -// atMostOne returns no item (nil) or one, or fails with an error -func (t *limiter[T]) atMostOne() (*T, error) { +// AtMostOne returns no item (nil) or one, or fails with an error +func (t *Limiter[T]) AtMostOne() (*T, error) { if t.Error != nil { return nil, t.Error } diff --git a/pkg/internal/limiter/limiter_test.go b/pkg/internal/limiter/limiter_test.go new file mode 100644 index 0000000..233ce8b --- /dev/null +++ b/pkg/internal/limiter/limiter_test.go @@ -0,0 +1,80 @@ +package limiter + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestError(t *testing.T) { + lim := New[string](errors.New("fail"), "foo") + + v, err := lim.One() + assert.Error(t, err) + assert.Nil(t, v) + + _, err = lim.All() + assert.Error(t, err) + + err = lim.None() + assert.Error(t, err) +} + +func TestFoundOne(t *testing.T) { + lim := New[string](nil, "foo") + + v, err := lim.One() + assert.NoError(t, err) + assert.Equal(t, "foo", *v) +} + +func TestNotFoundOne(t *testing.T) { + lim := New[string](nil) + + v, err := lim.One() + assert.Error(t, err) + assert.Nil(t, v) +} + +func TestAtMostOneEmpty(t *testing.T) { + lim := New[string](nil) + + v, err := lim.AtMostOne() + assert.NoError(t, err) + assert.Nil(t, v) +} + +func TestAtMostOne(t *testing.T) { + lim := New[string](nil, "foo") + + v, err := lim.AtMostOne() + assert.NoError(t, err) + assert.Equal(t, "foo", *v) +} + +func TestAtMostOneTooMany(t *testing.T) { + lim := New[string](nil, "foo", "bar") + + v, err := lim.AtMostOne() + assert.Error(t, err) + assert.Nil(t, v) +} + +func TestNone(t *testing.T) { + lim := New[string](nil) + assert.Nil(t, lim.None()) +} + +func TestNoneNotEmpty(t *testing.T) { + lim := New[string](nil, "foo") + assert.Error(t, lim.None()) +} + +func TestAll(t *testing.T) { + lim := New[string](nil, "foo", "bar") + + v, err := lim.All() + assert.NoError(t, err) + assert.Equal(t, []string{"foo", "bar"}, v) +} diff --git a/pkg/internal/testkit/api.go b/pkg/internal/testkit/api.go index 9c3d574..185f425 100644 --- a/pkg/internal/testkit/api.go +++ b/pkg/internal/testkit/api.go @@ -3,19 +3,21 @@ package testkit import ( "encoding/json" "fmt" + "io" "net/http" "net/http/httptest" "net/url" - "github.com/cloudscale-ch/cloudscale-go-sdk" + "github.com/cloudscale-ch/cloudscale-go-sdk/v4" ) // MockAPIServer is a mock http server that builds on httptest.Server and // http.ServeMux and provides methods to easily return mocked cloudscale API // responses. type MockAPIServer struct { - mux *http.ServeMux - server *httptest.Server + mux *http.ServeMux + server *httptest.Server + lastsent []byte } func NewMockAPIServer() *MockAPIServer { @@ -25,7 +27,7 @@ func NewMockAPIServer() *MockAPIServer { } } -// On matches the given patter and returns a status and the given data. The +// On matches the given pattern and returns a status and the given data. The // data can be a string or anything that go can marshal into a JSON. // // The servrer adds a default route that respods with an empty JSON object @@ -42,11 +44,16 @@ func (m *MockAPIServer) On(pattern string, status int, data any) { m.mux.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(status) + if status == 404 { + fmt.Println("Not handled: {}", r.URL) + } + var ( body []byte err error ) + // Turn response data into a JSON switch v := data.(type) { case string: body = []byte(v) @@ -57,9 +64,22 @@ func (m *MockAPIServer) On(pattern string, status int, data any) { } } - _, err = w.Write(body) - if err != nil { - panic(fmt.Sprintf("failed to write body for %s: %s", pattern, err)) + // Write response data + if len(body) > 0 { + _, err = w.Write(body) + if err != nil { + panic(fmt.Sprintf( + "failed to write body for %s: %s", pattern, err)) + } + } + + // Capture JSON that was sent for PUT/POST + if r.Method == "POST" || r.Method == "PUT" || r.Method == "PATCH" { + data, err := io.ReadAll(r.Body) + if err != nil { + panic(fmt.Sprintf("failed read request %s: %s", pattern, err)) + } + m.lastsent = data } }) } @@ -76,6 +96,18 @@ func (m *MockAPIServer) WithServers(servers []cloudscale.Server) { } } +// WithLoadBalancers ensures that the /v1/loadbalancers endpoints respond with +// the given loadbalancer objects. In addition to /v1/loadbalancers, this also +// implements /v1/loadbalancers/ for any loadbalancer with a UUID. +func (m *MockAPIServer) WithLoadBalancers(lbs []cloudscale.LoadBalancer) { + m.On("/v1/load-balancers", 200, lbs) + for _, lb := range lbs { + if lb.UUID != "" { + m.On(fmt.Sprintf("/v1/load-balancers/%s", lb.UUID), 200, lb) + } + } +} + // Client returns a cloudscale client pointing at the mock API server. func (m *MockAPIServer) Client() *cloudscale.Client { if m.server == nil { @@ -89,6 +121,14 @@ func (m *MockAPIServer) Client() *cloudscale.Client { return client } +// LastSent unmarshals the JSON last sent to the API server via POST/PUT/PATCH. +func (m *MockAPIServer) LastSent(v any) { + err := json.Unmarshal(m.lastsent, v) + if err != nil { + panic(fmt.Sprintf("failed to unmarshal: %s", m.lastsent)) + } +} + // Start runs the server in the background, until it is stopped/closed. func (m *MockAPIServer) Start() { if m.server != nil { diff --git a/pkg/internal/testkit/service.go b/pkg/internal/testkit/service.go new file mode 100644 index 0000000..dcb185f --- /dev/null +++ b/pkg/internal/testkit/service.go @@ -0,0 +1,26 @@ +package testkit + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Service helps construct Kubernetes v1.Service objects for testing +type Service struct { + Name string +} + +func NewService(name string) *Service { + return &Service{Name: name} +} + +func (s *Service) V1() *v1.Service { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: s.Name}, + } +} + +func (s *Service) WithName(name string) *Service { + s.Name = name + return s +} diff --git a/pkg/internal/testkit/tcp.go b/pkg/internal/testkit/tcp.go new file mode 100644 index 0000000..faa2cce --- /dev/null +++ b/pkg/internal/testkit/tcp.go @@ -0,0 +1,27 @@ +package testkit + +import ( + "bufio" + "errors" + "fmt" + "io" + "net" +) + +func TCPRead(addr string, port int32) (string, error) { + conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", addr, port)) + if err != nil { + return "", fmt.Errorf( + "failed to connect to %s:%d: %w", addr, port, err) + } + defer conn.Close() + + reader := bufio.NewReader(conn) + line, err := reader.ReadString('\n') + if err != nil && !errors.Is(err, io.EOF) { + return "", fmt.Errorf( + "failed to read from %s:%d: %w", addr, port, err) + } + + return line, nil +}