diff --git a/go.mod b/go.mod index 68312db7cd..0aba506ba1 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/moby/term v0.5.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0-rc5 - github.com/openshift/api v0.0.0-20231010075512-1ccc6058c62d + github.com/openshift/api v0.0.0-20231207204216-5efc6fca4b2d github.com/openshift/build-machinery-go v0.0.0-20230824093055-6a18da01283c github.com/openshift/client-go v0.0.0-20230926161409-848405da69e1 github.com/openshift/library-go v0.0.0-20231016155954-11c72a39f742 @@ -42,21 +42,21 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/vincent-petithory/dataurl v1.0.0 - golang.org/x/crypto v0.15.0 - golang.org/x/net v0.18.0 - golang.org/x/sys v0.14.0 + golang.org/x/crypto v0.16.0 + golang.org/x/net v0.19.0 + golang.org/x/sys v0.15.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.28.2 + k8s.io/api v0.28.4 k8s.io/apiextensions-apiserver v0.28.2 - k8s.io/apimachinery v0.28.2 + k8s.io/apimachinery v0.28.4 k8s.io/apiserver v0.28.2 k8s.io/cli-runtime v0.28.2 k8s.io/client-go v0.28.2 k8s.io/component-base v0.28.2 - k8s.io/klog/v2 v2.100.1 + k8s.io/klog/v2 v2.110.1 k8s.io/kubectl v0.28.2 k8s.io/pod-security-admission v0.28.2 - k8s.io/utils v0.0.0-20230726121419-3b25d923346b + k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/yaml v1.3.0 ) @@ -91,7 +91,7 @@ require ( github.com/go-git/gcfg v1.5.0 // indirect github.com/go-git/go-billy/v5 v5.1.0 // indirect github.com/go-git/go-git/v5 v5.3.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -177,7 +177,7 @@ require ( golang.org/x/mod v0.13.0 // indirect golang.org/x/oauth2 v0.14.0 // indirect golang.org/x/sync v0.5.0 // indirect - golang.org/x/term v0.14.0 // indirect + golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.14.0 // indirect @@ -196,7 +196,7 @@ require ( sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) replace github.com/apcera/gssapi => github.com/openshift/gssapi v0.0.0-20161010215902-5fb4217df13b diff --git a/go.sum b/go.sum index 50f0b5d67a..27af23702a 100644 --- a/go.sum +++ b/go.sum @@ -143,9 +143,8 @@ github.com/go-ldap/ldap/v3 v3.4.3 h1:JCKUtJPIcyOuG7ctGabLKMgIlKnGumD/iGjuWeEruDI github.com/go-ldap/ldap/v3 v3.4.3/go.mod h1:7LdHfVt6iIOESVEe3Bs4Jp2sHEKgDeduAhgM1/f9qmo= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= @@ -266,6 +265,7 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= @@ -404,8 +404,8 @@ github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQB github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/openshift/api v0.0.0-20231010075512-1ccc6058c62d h1:KZRCA2bExuO7wU5MSvDu2Cs9qe9nFUTh3ITReJp7iaQ= -github.com/openshift/api v0.0.0-20231010075512-1ccc6058c62d/go.mod h1:qNtV0315F+f8ld52TLtPvrfivZpdimOzTi3kn9IVbtU= +github.com/openshift/api v0.0.0-20231207204216-5efc6fca4b2d h1:DJxRHF5883tyG1Lf/5kQB4Ut2ycutC7tpGIaXMLKzYk= +github.com/openshift/api v0.0.0-20231207204216-5efc6fca4b2d/go.mod h1:qNtV0315F+f8ld52TLtPvrfivZpdimOzTi3kn9IVbtU= github.com/openshift/build-machinery-go v0.0.0-20230824093055-6a18da01283c h1:H5k87xq6hGgR1YCF/8hLv3j5jWd64Eh3ZhqF9WUJ15Q= github.com/openshift/build-machinery-go v0.0.0-20230824093055-6a18da01283c/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/client-go v0.0.0-20230926161409-848405da69e1 h1:W1N/3nVciqmjPjn2xldHjb0AwwCQzlGxLvX5BCgE8H4= @@ -546,8 +546,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190419195159-b8972e603456/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= @@ -578,8 +578,8 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= @@ -623,14 +623,14 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= -golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -717,12 +717,12 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= -k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= +k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= +k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= k8s.io/apiextensions-apiserver v0.28.2 h1:J6/QRWIKV2/HwBhHRVITMLYoypCoPY1ftigDM0Kn+QU= k8s.io/apiextensions-apiserver v0.28.2/go.mod h1:5tnkxLGa9nefefYzWuAlWZ7RZYuN/765Au8cWLA6SRg= -k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= -k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= +k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= +k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= k8s.io/apiserver v0.28.2 h1:rBeYkLvF94Nku9XfXyUIirsVzCzJBs6jMn3NWeHieyI= k8s.io/apiserver v0.28.2/go.mod h1:f7D5e8wH8MWcKD7azq6Csw9UN+CjdtXIVQUyUhrtb+E= k8s.io/cli-runtime v0.28.2 h1:64meB2fDj10/ThIMEJLO29a1oujSm0GQmKzh1RtA/uk= @@ -733,8 +733,8 @@ k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E= k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc= k8s.io/component-helpers v0.28.2 h1:r/XJ265PMirW9EcGXr/F+2yWrLPo2I69KdvcY/h9HAo= k8s.io/component-helpers v0.28.2/go.mod h1:pF1R5YWQ+sgf0i6EbVm+MQCzkYuqutDUibdrkvAa6aI= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-aggregator v0.28.2 h1:tCjAfB1p/v18yD2NpegNQRuahzyA/szFfcRARnpjDeo= k8s.io/kube-aggregator v0.28.2/go.mod h1:g4hZVjC4KhJtZHV2pyiRBiU6AdBA/sAjh9Y9GJC/SbU= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= @@ -745,8 +745,8 @@ k8s.io/metrics v0.28.2 h1:Z/oMk5SmiT/Ji1SaWOPfW2l9W831BLO9/XxDq9iS3ak= k8s.io/metrics v0.28.2/go.mod h1:QTIIdjMrq+KodO+rmp6R9Pr1LZO8kTArNtkWoQXw0sw= k8s.io/pod-security-admission v0.28.2 h1:3kiOL+gc6auNTGHuQ0hVsGxYu2YO/7DZb0xYR84GxiQ= k8s.io/pod-security-admission v0.28.2/go.mod h1:gReea39xbhIzf4Ry0FDuiTi8uj1N5R9YXOh8zQSuTxs= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= +k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h1:PFWFSkpArPNJxFX4ZKWAk9NSeRoZaXschn+ULa4xVek= @@ -757,7 +757,7 @@ sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 h1:vq2Tt sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3/go.mod h1:/d88dHCvoy7d0AKFT0yytezSGZKjsZBVs9YTkBHSGFk= sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/pkg/helpers/describe/describer_test.go b/pkg/helpers/describe/describer_test.go index 6c5f305a0c..6119022085 100644 --- a/pkg/helpers/describe/describer_test.go +++ b/pkg/helpers/describe/describer_test.go @@ -124,6 +124,7 @@ var MissingDescriberGroupCoverageExceptions = []schema.GroupVersion{ {Group: "samples.operator.openshift.io", Version: "v1"}, {Group: "cloud.network.openshift.io", Version: "v1"}, + {Group: "network.openshift.io", Version: "v1alpha1"}, {Group: "apiserver.openshift.io", Version: "v1"}, diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index ab59311813..a8c29bfbd5 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging without becoming coupled to a particular logging implementation. This is not @@ -73,6 +74,29 @@ received: If the Go standard library had defined an interface for logging, this project probably would not be needed. Alas, here we are. +When the Go developers started developing such an interface with +[slog](https://github.com/golang/go/issues/56345), they adopted some of the +logr design but also left out some parts and changed others: + +| Feature | logr | slog | +|---------|------|------| +| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) | +| Low-level API | `LogSink` | `Handler` | +| Stack unwinding | done by `LogSink` | done by `Logger` | +| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) | +| Generating a value for logging on demand | `Marshaler` | `LogValuer` | +| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" | +| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` | +| Passing logger via context | `NewContext`, `FromContext` | no API | +| Adding a name to a logger | `WithName` | no API | +| Modify verbosity of log entries in a call chain | `V` | no API | +| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | + +The high-level slog API is explicitly meant to be one of many different APIs +that can be layered on top of a shared `slog.Handler`. logr is one such +alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) +package. + ### Inspiration Before you consider this package, please read [this blog post by the @@ -118,6 +142,91 @@ There are implementations for the following logging libraries: - **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) - **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) +## slog interoperability + +Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` +and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and +`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. +As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level +slog API. `slogr` itself leaves that to the caller. + +## Using a `logr.Sink` as backend for slog + +Ideally, a logr sink implementation should support both logr and slog by +implementing both the normal logr interface(s) and `slogr.SlogSink`. Because +of a conflict in the parameters of the common `Enabled` method, it is [not +possible to implement both slog.Handler and logr.Sink in the same +type](https://github.com/golang/go/issues/59110). + +If both are supported, log calls can go from the high-level APIs to the backend +without the need to convert parameters. `NewLogr` and `NewSlogHandler` can +convert back and forth without adding additional wrappers, with one exception: +when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then +`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future +log calls. + +Such an implementation should also support values that implement specific +interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`, +`slog.GroupValue`). logr does not convert those. + +Not supporting slog has several drawbacks: +- Recording source code locations works correctly if the handler gets called + through `slog.Logger`, but may be wrong in other cases. That's because a + `logr.Sink` does its own stack unwinding instead of using the program counter + provided by the high-level API. +- slog levels <= 0 can be mapped to logr levels by negating the level without a + loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as + used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink + because logr does not support "more important than info" levels. +- The slog group concept is supported by prefixing each key in a key/value + pair with the group names, separated by a dot. For structured output like + JSON it would be better to group the key/value pairs inside an object. +- Special slog values and interfaces don't work as expected. +- The overhead is likely to be higher. + +These drawbacks are severe enough that applications using a mixture of slog and +logr should switch to a different backend. + +## Using a `slog.Handler` as backend for logr + +Using a plain `slog.Handler` without support for logr works better than the +other direction: +- All logr verbosity levels can be mapped 1:1 to their corresponding slog level + by negating them. +- Stack unwinding is done by the `slogr.SlogSink` and the resulting program + counter is passed to the `slog.Handler`. +- Names added via `Logger.WithName` are gathered and recorded in an additional + attribute with `logger` as key and the names separated by slash as value. +- `Logger.Error` is turned into a log record with `slog.LevelError` as level + and an additional attribute with `err` as key, if an error was provided. + +The main drawback is that `logr.Marshaler` will not be supported. Types should +ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility +with logr implementations without slog support is not important, then +`slog.Valuer` is sufficient. + +## Context support for slog + +Storing a logger in a `context.Context` is not supported by +slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this +to fill this gap: + + func HandlerFromContext(ctx context.Context) slog.Handler { + logger, err := logr.FromContext(ctx) + if err == nil { + return slogr.NewSlogHandler(logger) + } + return slog.Default().Handler() + } + + func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { + return logr.NewContext(ctx, slogr.NewLogr(handler)) + } + +The downside is that storing and retrieving a `slog.Handler` needs more +allocations compared to using a `logr.Logger`. Therefore the recommendation is +to use the `logr.Logger` API in code which uses contextual logging. + ## FAQ ### Conceptual @@ -241,7 +350,9 @@ Otherwise, you can start out with `0` as "you always want to see this", Then gradually choose levels in between as you need them, working your way down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs.) +info-type logs). For reference, slog pre-defines -4 for debug logs +(corresponds to 4 in logr), which matches what is +[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). #### How do I choose my keys? diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md new file mode 100644 index 0000000000..1ca756fc7b --- /dev/null +++ b/vendor/github.com/go-logr/logr/SECURITY.md @@ -0,0 +1,18 @@ +# Security Policy + +If you have discovered a security vulnerability in this project, please report it +privately. **Do not disclose it as a public issue.** This gives us time to work with you +to fix the issue before public exposure, reducing the chance that the exploit will be +used before a patch is released. + +You may submit the report in the following ways: + +- send an email to go-logr-security@googlegroups.com +- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new) + +Please provide the following information in your report: + +- A description of the vulnerability and its impact +- How to reproduce the issue + +We ask that you give us 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index e027aea3fd..2a5075a180 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -127,9 +127,9 @@ limitations under the License. // such a value can call its methods without having to check whether the // instance is ready for use. // -// Calling methods with the null logger (Logger{}) as instance will crash -// because it has no LogSink. Therefore this null logger should never be passed -// around. For cases where passing a logger is optional, a pointer to Logger +// The zero logger (= Logger{}) is identical to Discard() and discards all log +// entries. Code that receives a Logger by value can simply call it, the methods +// will never crash. For cases where passing a logger is optional, a pointer to Logger // should be used. // // # Key Naming Conventions @@ -258,6 +258,12 @@ type Logger struct { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { + // Some implementations of LogSink look at the caller in Enabled (e.g. + // different verbosity levels per package or file), but we only pass one + // CallDepth in (via Init). This means that all calls from Logger to the + // LogSink's Enabled, Info, and Error methods must have the same number of + // frames. In other words, Logger methods can't call other Logger methods + // which call these LogSink methods unless we do it the same in all paths. return l.sink != nil && l.sink.Enabled(l.level) } @@ -267,11 +273,11 @@ func (l Logger) Enabled() bool { // line. The key/value pairs can then be used to add additional variable // information. The key/value pairs must alternate string keys and arbitrary // values. -func (l Logger) Info(msg string, keysAndValues ...interface{}) { +func (l Logger) Info(msg string, keysAndValues ...any) { if l.sink == nil { return } - if l.Enabled() { + if l.sink.Enabled(l.level) { // see comment in Enabled if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -289,7 +295,7 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // while the err argument should be used to attach the actual error that // triggered this log line, if present. The err parameter is optional // and nil may be passed instead of an error instance. -func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { +func (l Logger) Error(err error, msg string, keysAndValues ...any) { if l.sink == nil { return } @@ -314,9 +320,16 @@ func (l Logger) V(level int) Logger { return l } +// GetV returns the verbosity level of the logger. If the logger's LogSink is +// nil as in the Discard logger, this will always return 0. +func (l Logger) GetV() int { + // 0 if l.sink nil because of the if check in V above. + return l.level +} + // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. -func (l Logger) WithValues(keysAndValues ...interface{}) Logger { +func (l Logger) WithValues(keysAndValues ...any) Logger { if l.sink == nil { return l } @@ -467,15 +480,15 @@ type LogSink interface { // The level argument is provided for optional logging. This method will // only be called when Enabled(level) is true. See Logger.Info for more // details. - Info(level int, msg string, keysAndValues ...interface{}) + Info(level int, msg string, keysAndValues ...any) // Error logs an error, with the given message and key/value pairs as // context. See Logger.Error for more details. - Error(err error, msg string, keysAndValues ...interface{}) + Error(err error, msg string, keysAndValues ...any) // WithValues returns a new LogSink with additional key/value pairs. See // Logger.WithValues for more details. - WithValues(keysAndValues ...interface{}) LogSink + WithValues(keysAndValues ...any) LogSink // WithName returns a new LogSink with the specified name appended. See // Logger.WithName for more details. @@ -546,5 +559,5 @@ type Marshaler interface { // with exported fields // // It may return any value of any type. - MarshalLog() interface{} + MarshalLog() any } diff --git a/vendor/github.com/go-logr/logr/slogr/sloghandler.go b/vendor/github.com/go-logr/logr/slogr/sloghandler.go new file mode 100644 index 0000000000..ec6725ce2c --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/sloghandler.go @@ -0,0 +1,168 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package slogr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +type slogHandler struct { + // May be nil, in which case all logs get discarded. + sink logr.LogSink + // Non-nil if sink is non-nil and implements SlogSink. + slogSink SlogSink + + // groupPrefix collects values from WithGroup calls. It gets added as + // prefix to value keys when handling a log record. + groupPrefix string + + // levelBias can be set when constructing the handler to influence the + // slog.Level of log records. A positive levelBias reduces the + // slog.Level value. slog has no API to influence this value after the + // handler got created, so it can only be set indirectly through + // Logger.V. + levelBias slog.Level +} + +var _ slog.Handler = &slogHandler{} + +// groupSeparator is used to concatenate WithGroup names and attribute keys. +const groupSeparator = "." + +// GetLevel is used for black box unit testing. +func (l *slogHandler) GetLevel() slog.Level { + return l.levelBias +} + +func (l *slogHandler) Enabled(ctx context.Context, level slog.Level) bool { + return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) +} + +func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { + if l.slogSink != nil { + // Only adjust verbosity level of log entries < slog.LevelError. + if record.Level < slog.LevelError { + record.Level -= l.levelBias + } + return l.slogSink.Handle(ctx, record) + } + + // No need to check for nil sink here because Handle will only be called + // when Enabled returned true. + + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + if attr.Key != "" { + kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) + } + return true + }) + if record.Level >= slog.LevelError { + l.sinkWithCallDepth().Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.sinkWithCallDepth().Info(level, record.Message, kvList...) + } + return nil +} + +// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info +// are called by Handle, code in slog gets skipped. +// +// This offset currently (Go 1.21.0) works for calls through +// slog.New(NewSlogHandler(...)). There's no guarantee that the call +// chain won't change. Wrapping the handler will also break unwinding. It's +// still better than not adjusting at all.... +// +// This cannot be done when constructing the handler because NewLogr needs +// access to the original sink without this adjustment. A second copy would +// work, but then WithAttrs would have to be called for both of them. +func (l *slogHandler) sinkWithCallDepth() logr.LogSink { + if sink, ok := l.sink.(logr.CallDepthLogSink); ok { + return sink.WithCallDepth(2) + } + return l.sink +} + +func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + if l.sink == nil || len(attrs) == 0 { + return l + } + + copy := *l + if l.slogSink != nil { + copy.slogSink = l.slogSink.WithAttrs(attrs) + copy.sink = copy.slogSink + } else { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + if attr.Key != "" { + kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) + } + } + copy.sink = l.sink.WithValues(kvList...) + } + return © +} + +func (l *slogHandler) WithGroup(name string) slog.Handler { + if l.sink == nil { + return l + } + copy := *l + if l.slogSink != nil { + copy.slogSink = l.slogSink.WithGroup(name) + copy.sink = l.slogSink + } else { + copy.groupPrefix = copy.addGroupPrefix(name) + } + return © +} + +func (l *slogHandler) addGroupPrefix(name string) string { + if l.groupPrefix == "" { + return name + } + return l.groupPrefix + groupSeparator + name +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a logr.LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(slogr.NewSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l *slogHandler) levelFromSlog(level slog.Level) int { + result := -level + result += l.levelBias // in case the original logr.Logger had a V level + if result < 0 { + result = 0 // because logr.LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go new file mode 100644 index 0000000000..eb519ae23f --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/slogr.go @@ -0,0 +1,108 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package slogr enables usage of a slog.Handler with logr.Logger as front-end +// API and of a logr.LogSink through the slog.Handler and thus slog.Logger +// APIs. +// +// See the README in the top-level [./logr] package for a discussion of +// interoperability. +package slogr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +// NewLogr returns a logr.Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func NewLogr(handler slog.Handler) logr.Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return logr.Discard() + } + return logr.New(handler.sink).V(int(handler.levelBias)) + } + return logr.New(&slogSink{handler: handler}) +} + +// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the logr.Logger: +// +// logger := +// slog.New(NewSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the logr.Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(NewSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(NewSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(NewSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(NewSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func NewSlogHandler(logger logr.Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (logr.Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in NewLogr +// and NewSlogHandler. +type SlogSink interface { + logr.LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogsink.go b/vendor/github.com/go-logr/logr/slogr/slogsink.go new file mode 100644 index 0000000000..6fbac561d9 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/slogsink.go @@ -0,0 +1,122 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package slogr + +import ( + "context" + "log/slog" + "runtime" + "time" + + "github.com/go-logr/logr" +) + +var ( + _ logr.LogSink = &slogSink{} + _ logr.CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} +) + +// Underlier is implemented by the LogSink returned by NewLogr. +type Underlier interface { + // GetUnderlying returns the Handler used by the LogSink. + GetUnderlying() slog.Handler +} + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" + + // errKey is used to log the error parameter of Error as an additional attribute. + errKey = "err" +) + +type slogSink struct { + callDepth int + name string + handler slog.Handler +} + +func (l *slogSink) Init(info logr.RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *slogSink) GetUnderlying() slog.Handler { + return l.handler +} + +func (l *slogSink) WithCallDepth(depth int) logr.LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *slogSink) Enabled(level int) bool { + return l.handler.Enabled(context.Background(), slog.Level(-level)) +} + +func (l *slogSink) Info(level int, msg string, kvList ...interface{}) { + l.log(nil, msg, slog.Level(-level), kvList...) +} + +func (l *slogSink) Error(err error, msg string, kvList ...interface{}) { + l.log(err, msg, slog.LevelError, kvList...) +} + +func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) { + var pcs [1]uintptr + // skip runtime.Callers, this function, Info/Error, and all helper functions above that. + runtime.Callers(3+l.callDepth, pcs[:]) + + record := slog.NewRecord(time.Now(), level, msg, pcs[0]) + if l.name != "" { + record.AddAttrs(slog.String(nameKey, l.name)) + } + if err != nil { + record.AddAttrs(slog.Any(errKey, err)) + } + record.Add(kvList...) + l.handler.Handle(context.Background(), record) +} + +func (l slogSink) WithName(name string) logr.LogSink { + if l.name != "" { + l.name = l.name + "/" + } + l.name += name + return &l +} + +func (l slogSink) WithValues(kvList ...interface{}) logr.LogSink { + l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) + return &l +} + +func kvListToAttrs(kvList ...interface{}) []slog.Attr { + // We don't need the record itself, only its Add method. + record := slog.NewRecord(time.Time{}, 0, "", 0) + record.Add(kvList...) + attrs := make([]slog.Attr, 0, record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + attrs = append(attrs, attr) + return true + }) + return attrs +} diff --git a/vendor/github.com/openshift/api/Dockerfile.rhel8 b/vendor/github.com/openshift/api/Dockerfile.rhel8 index 62fa30b2c0..172c7a0c12 100644 --- a/vendor/github.com/openshift/api/Dockerfile.rhel8 +++ b/vendor/github.com/openshift/api/Dockerfile.rhel8 @@ -1,10 +1,10 @@ -FROM registry.ci.openshift.org/ocp/builder:rhel-8-golang-1.20-openshift-4.15 AS builder +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.20-openshift-4.15 AS builder WORKDIR /go/src/github.com/openshift/api COPY . . ENV GO_PACKAGE github.com/openshift/api RUN make build --warn-undefined-variables -FROM registry.ci.openshift.org/ocp/4.14:base +FROM registry.ci.openshift.org/ocp/4.15:base-rhel9 # copy the built binaries to /usr/bin COPY --from=builder /go/src/github.com/openshift/api/render /usr/bin/ @@ -21,15 +21,13 @@ COPY operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.y # these are applied by the CVO COPY manifests /manifests -# TODO copy these back when we're ready to make the switch from cluster-config-operator to here -#COPY config/v1/*_config-operator_*.yaml /manifests -#COPY quota/v1/*.crd.yaml /manifests -#COPY security/v1/*.crd.yaml /manifests -#COPY securityinternal/v1/*.crd.yaml /manifests -#COPY authorization/v1/*.crd.yaml /manifests -#COPY operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml /manifests -#COPY operator/v1/0000_10_config-operator_*.yaml /manifests -#COPY payload-command/empty-resources /manifests +COPY config/v1/*_config-operator_*.yaml /manifests +COPY quota/v1/*.crd.yaml /manifests +COPY security/v1/*.crd.yaml /manifests +COPY securityinternal/v1/*.crd.yaml /manifests +COPY authorization/v1/*.crd.yaml /manifests +COPY operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml /manifests +COPY operator/v1/0000_10_config-operator_*.yaml /manifests +COPY payload-command/empty-resources /manifests -# TODO uncomment after all the other "add a new image" steps are complete. -#LABEL io.openshift.release.operator true +LABEL io.openshift.release.operator true diff --git a/vendor/github.com/openshift/api/OWNERS b/vendor/github.com/openshift/api/OWNERS index ce5e8dc337..2e956a47dd 100644 --- a/vendor/github.com/openshift/api/OWNERS +++ b/vendor/github.com/openshift/api/OWNERS @@ -1,29 +1,19 @@ reviewers: - - adambkaplan - - abhinavdahiya - - smarterclayton - deads2k - derekwaynecarr - - eparis - JoelSpeed - - jwforres - knobunc - sjenning - mfojtik - soltysh - - sttts - bparees approvers: - bparees - deads2k - derekwaynecarr - - eparis - JoelSpeed - - jwforres - knobunc - mfojtik - sjenning - - smarterclayton - soltysh - spadgett - - sttts diff --git a/vendor/github.com/openshift/api/README.md b/vendor/github.com/openshift/api/README.md index 5ad3880be2..3d13b8c985 100644 --- a/vendor/github.com/openshift/api/README.md +++ b/vendor/github.com/openshift/api/README.md @@ -1,5 +1,7 @@ # api -The canonical location of the OpenShift API definition. This repo holds the API type definitions and serialization code used by [openshift/client-go](https://github.com/openshift/client-go) +The canonical location of the OpenShift API definition. +This repo holds the API type definitions and serialization code used by [openshift/client-go](https://github.com/openshift/client-go) +APIs in this repo ship inside OCP payloads. ## defining new APIs @@ -44,7 +46,7 @@ Since Kubernetes 1.16, every CRD created in `apiextensions.k8s.io/v1` is require These schemas are often very long and complex, and should not be written by hand. For OpenShift, we provide Makefile targets in [build-machinery-go](https://github.com/openshift/build-machinery-go/) which generate the schema, built on upstream's [controller-gen](https://github.com/kubernetes-sigs/controller-tools) tool. -If you make a change to a CRD type in this repo, simply calling `make update-codegen-crds` should regenerate all CRDs and update the manifests. If yours is not updated, ensure that the path to its API is included in our [calls to the Makefile targets](https://github.com/openshift/api/blob/release-4.5/Makefile#L17-L29). +If you make a change to a CRD type in this repo, simply calling `make update-codegen-crds` should regenerate all CRDs and update the manifests. If yours is not updated, ensure that the path to its API is included in our [calls to the Makefile targets](https://github.com/openshift/api/blob/release-4.5/Makefile#L17-L29), if this doesn't help try calling `make generate-with-container` for executing the generators in a controlled environment. To add this generator to another repo: 1. Vendor `github.com/openshift/build-machinery-go` diff --git a/vendor/github.com/openshift/api/build/v1/consts.go b/vendor/github.com/openshift/api/build/v1/consts.go index 3310b9e0a8..0d9c8f03b3 100644 --- a/vendor/github.com/openshift/api/build/v1/consts.go +++ b/vendor/github.com/openshift/api/build/v1/consts.go @@ -164,9 +164,11 @@ const ( StatusReasonBuildPodEvicted StatusReason = "BuildPodEvicted" ) -// env vars -// WhitelistEnvVarNames is a list of special env vars allows s2i containers -var WhitelistEnvVarNames = []string{"BUILD_LOGLEVEL", "GIT_SSL_NO_VERIFY", "HTTP_PROXY", "HTTPS_PROXY", "LANG", "NO_PROXY"} +// WhitelistEnvVarNames is a list of environment variable names that are allowed to be specified +// in a buildconfig and merged into the created build pods, the code for this is located in +// openshift/openshift-controller-manager +var WhitelistEnvVarNames = []string{"BUILD_LOGLEVEL", "GIT_SSL_NO_VERIFY", "GIT_LFS_SKIP_SMUDGE", "LANG", + "HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY", "http_proxy", "https_proxy", "no_proxy"} // env vars const ( diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-CustomNoUpgrade.crd.yaml new file mode 100644 index 0000000000..a3d3576868 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-CustomNoUpgrade.crd.yaml @@ -0,0 +1,503 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/495 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: clusterversions.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterVersion + plural: clusterversions + singular: clusterversion + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.history[?(@.state=="Completed")].version + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime + name: Since + type: date + - jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster. + type: object + required: + - clusterID + properties: + capabilities: + description: capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics. + type: object + properties: + additionalEnabledCapabilities: + description: additionalEnabledCapabilities extends the set of managed capabilities beyond the baseline defined in baselineCapabilitySet. The default is an empty set. + type: array + items: + description: ClusterVersionCapability enumerates optional, core cluster components. + type: string + enum: + - openshift-samples + - baremetal + - marketplace + - Console + - Insights + - Storage + - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig + - ImageRegistry + - OperatorLifecycleManager + - CloudCredential + x-kubernetes-list-type: atomic + baselineCapabilitySet: + description: baselineCapabilitySet selects an initial set of optional capabilities to enable, which can be extended via additionalEnabledCapabilities. If unset, the cluster will choose a default, and the default may change over time. The current default is vCurrent. + type: string + enum: + - None + - v4.11 + - v4.12 + - v4.13 + - v4.14 + - v4.15 + - vCurrent + channel: + description: channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters. + type: string + clusterID: + description: clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field. + type: string + desiredUpdate: + description: "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. \n Some of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error. \n If an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed." + type: object + properties: + architecture: + description: architecture is an optional field that indicates the desired value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. architecture can only be set to Multi thereby only allowing updates from single to multi architecture. If architecture is set, image cannot be set and version must be set. Valid values are 'Multi' and empty. + type: string + enum: + - Multi + - "" + force: + description: force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources. + type: boolean + image: + description: image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, version is ignored. When image is set, version should be empty. When image is set, architecture cannot be specified. + type: string + version: + description: version is a semantic version identifying the update version. version is ignored if image is specified and required if architecture is specified. + type: string + x-kubernetes-validations: + - rule: 'has(self.architecture) && has(self.image) ? (self.architecture == '''' || self.image == '''') : true' + message: cannot set both Architecture and Image + - rule: 'has(self.architecture) && self.architecture != '''' ? self.version != '''' : true' + message: Version must be set if Architecture is set + overrides: + description: overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object. + type: array + items: + description: ComponentOverride allows overriding cluster version operator's behavior for a component. + type: object + required: + - group + - kind + - name + - namespace + - unmanaged + properties: + group: + description: group identifies the API group that the kind is in. + type: string + kind: + description: kind indentifies which object to override. + type: string + name: + description: name is the component's name. + type: string + namespace: + description: namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty. + type: string + unmanaged: + description: 'unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false' + type: boolean + x-kubernetes-list-map-keys: + - kind + - group + - namespace + - name + x-kubernetes-list-type: map + signatureStores: + description: "signatureStores contains the upstream URIs to verify release signatures and optional reference to a config map by name containing the PEM-encoded CA bundle. \n By default, CVO will use existing signature stores if this property is empty. The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature in these stores in parallel only when local ConfigMaps did not include a valid signature. Validation will fail if none of the signature stores reply with valid signature before timeout. Setting signatureStores will replace the default signature stores with custom signature stores. Default stores can be used with custom signature stores by adding them manually. \n A maximum of 32 signature stores may be configured." + type: array + maxItems: 32 + items: + description: SignatureStore represents the URL of custom Signature Store + type: object + required: + - url + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the signature store is not honored. If the specified ca data is not valid, the signature store is not honored. If empty, we fall back to the CA configured via Proxy, which is appended to the default system roots. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + url: + description: url contains the upstream custom signature store URL. url should be a valid absolute http/https URI of an upstream signature store as per rfc1738. This must be provided and cannot be empty. + type: string + x-kubernetes-validations: + - rule: isURL(self) + message: url must be a valid absolute URL + x-kubernetes-list-map-keys: + - url + x-kubernetes-list-type: map + upstream: + description: upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region. + type: string + status: + description: status contains information about the available updates and any in-progress updates. + type: object + required: + - availableUpdates + - desired + - observedGeneration + - versionHash + properties: + availableUpdates: + description: availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified. + type: array + items: + description: Release represents an OpenShift release image and associated metadata. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + x-kubernetes-list-type: set + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + nullable: true + x-kubernetes-list-type: atomic + capabilities: + description: capabilities describes the state of optional, core cluster components. + type: object + properties: + enabledCapabilities: + description: enabledCapabilities lists all the capabilities that are currently managed. + type: array + items: + description: ClusterVersionCapability enumerates optional, core cluster components. + type: string + enum: + - openshift-samples + - baremetal + - marketplace + - Console + - Insights + - Storage + - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig + - ImageRegistry + - OperatorLifecycleManager + - CloudCredential + x-kubernetes-list-type: atomic + knownCapabilities: + description: knownCapabilities lists all the capabilities known to the current cluster. + type: array + items: + description: ClusterVersionCapability enumerates optional, core cluster components. + type: string + enum: + - openshift-samples + - baremetal + - marketplace + - Console + - Insights + - Storage + - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig + - ImageRegistry + - OperatorLifecycleManager + - CloudCredential + x-kubernetes-list-type: atomic + conditionalUpdates: + description: conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified. + type: array + items: + description: ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster. + type: object + required: + - release + - risks + properties: + conditions: + description: 'conditions represents the observations of the conditional update''s current status. Known types are: * Evaluating, for whether the cluster-version operator will attempt to evaluate any risks[].matchingRules. * Recommended, for whether the update is recommended for the current cluster.' + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + release: + description: release is the target of the update. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + x-kubernetes-list-type: set + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + risks: + description: risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update. + type: array + minItems: 1 + items: + description: ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update. + type: object + required: + - matchingRules + - message + - name + - url + properties: + matchingRules: + description: matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended. + type: array + minItems: 1 + items: + description: ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate. + type: object + required: + - type + properties: + promql: + description: promQL represents a cluster condition based on PromQL. + type: object + required: + - promql + properties: + promql: + description: PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures. + type: string + type: + description: type represents the cluster-condition type. This defines the members and semantics of any additional properties. + type: string + enum: + - Always + - PromQL + x-kubernetes-list-type: atomic + message: + description: message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + minLength: 1 + name: + description: name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state. + type: string + minLength: 1 + url: + description: url contains information about this risk. + type: string + format: uri + minLength: 1 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-list-type: atomic + conditions: + description: conditions provides information about the cluster version. The condition "Available" is set to true if the desiredUpdate has been reached. The condition "Progressing" is set to true if an update is being applied. The condition "Degraded" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation. + type: array + items: + description: ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components. + type: object + required: + - lastTransitionTime + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update to the current status property. + type: string + format: date-time + message: + description: message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + reason: + description: reason is the CamelCase reason for the condition's current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + desired: + description: desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + x-kubernetes-list-type: set + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + history: + description: history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved. + type: array + items: + description: UpdateHistory is a single attempted update to the cluster. + type: object + required: + - completionTime + - image + - startedTime + - state + - verified + properties: + acceptedRisks: + description: acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets. + type: string + completionTime: + description: completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update). + type: string + format: date-time + nullable: true + image: + description: image is a container image location that contains the update. This value is always populated. + type: string + startedTime: + description: startedTime is the time at which the update was started. + type: string + format: date-time + state: + description: state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied). + type: string + verified: + description: verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted. + type: boolean + version: + description: version is a semantic version identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty. + type: string + x-kubernetes-list-type: atomic + observedGeneration: + description: observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version. + type: integer + format: int64 + versionHash: + description: versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only. + type: string + x-kubernetes-validations: + - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''baremetal'' in self.spec.capabilities.additionalEnabledCapabilities ? ''MachineAPI'' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && ''MachineAPI'' in self.status.capabilities.enabledCapabilities) : true' + message: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability + - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''marketplace'' in self.spec.capabilities.additionalEnabledCapabilities ? ''OperatorLifecycleManager'' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && ''OperatorLifecycleManager'' in self.status.capabilities.enabledCapabilities) : true' + message: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-Default.crd.yaml similarity index 97% rename from vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml rename to vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-Default.crd.yaml index 7cf29c2a84..6d802f0ca6 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-Default.crd.yaml @@ -5,6 +5,7 @@ metadata: api-approved.openshift.io: https://github.com/openshift/api/pull/495 include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: Default name: clusterversions.config.openshift.io spec: group: config.openshift.io @@ -76,6 +77,7 @@ spec: - DeploymentConfig - ImageRegistry - OperatorLifecycleManager + - CloudCredential x-kubernetes-list-type: atomic baselineCapabilitySet: description: baselineCapabilitySet selects an initial set of optional capabilities to enable, which can be extended via additionalEnabledCapabilities. If unset, the cluster will choose a default, and the default may change over time. The current default is vCurrent. @@ -86,6 +88,7 @@ spec: - v4.12 - v4.13 - v4.14 + - v4.15 - vCurrent channel: description: channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters. @@ -145,6 +148,12 @@ spec: unmanaged: description: 'unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false' type: boolean + x-kubernetes-list-map-keys: + - kind + - group + - namespace + - name + x-kubernetes-list-type: map upstream: description: upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region. type: string @@ -169,6 +178,7 @@ spec: type: array items: type: string + x-kubernetes-list-type: set image: description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. type: string @@ -179,6 +189,7 @@ spec: description: version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified. type: string nullable: true + x-kubernetes-list-type: atomic capabilities: description: capabilities describes the state of optional, core cluster components. type: object @@ -203,6 +214,7 @@ spec: - DeploymentConfig - ImageRegistry - OperatorLifecycleManager + - CloudCredential x-kubernetes-list-type: atomic knownCapabilities: description: knownCapabilities lists all the capabilities known to the current cluster. @@ -224,6 +236,7 @@ spec: - DeploymentConfig - ImageRegistry - OperatorLifecycleManager + - CloudCredential x-kubernetes-list-type: atomic conditionalUpdates: description: conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified. @@ -291,6 +304,7 @@ spec: type: array items: type: string + x-kubernetes-list-type: set image: description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. type: string @@ -383,6 +397,9 @@ spec: type: description: type specifies the aspect reported by this condition. type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map desired: description: desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag. type: object @@ -392,6 +409,7 @@ spec: type: array items: type: string + x-kubernetes-list-type: set image: description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. type: string @@ -438,6 +456,7 @@ spec: version: description: version is a semantic version identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty. type: string + x-kubernetes-list-type: atomic observedGeneration: description: observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version. type: integer diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..878a71fa6d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,503 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/495 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: clusterversions.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterVersion + plural: clusterversions + singular: clusterversion + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.history[?(@.state=="Completed")].version + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime + name: Since + type: date + - jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster. + type: object + required: + - clusterID + properties: + capabilities: + description: capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics. + type: object + properties: + additionalEnabledCapabilities: + description: additionalEnabledCapabilities extends the set of managed capabilities beyond the baseline defined in baselineCapabilitySet. The default is an empty set. + type: array + items: + description: ClusterVersionCapability enumerates optional, core cluster components. + type: string + enum: + - openshift-samples + - baremetal + - marketplace + - Console + - Insights + - Storage + - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig + - ImageRegistry + - OperatorLifecycleManager + - CloudCredential + x-kubernetes-list-type: atomic + baselineCapabilitySet: + description: baselineCapabilitySet selects an initial set of optional capabilities to enable, which can be extended via additionalEnabledCapabilities. If unset, the cluster will choose a default, and the default may change over time. The current default is vCurrent. + type: string + enum: + - None + - v4.11 + - v4.12 + - v4.13 + - v4.14 + - v4.15 + - vCurrent + channel: + description: channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters. + type: string + clusterID: + description: clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field. + type: string + desiredUpdate: + description: "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. \n Some of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error. \n If an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed." + type: object + properties: + architecture: + description: architecture is an optional field that indicates the desired value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. architecture can only be set to Multi thereby only allowing updates from single to multi architecture. If architecture is set, image cannot be set and version must be set. Valid values are 'Multi' and empty. + type: string + enum: + - Multi + - "" + force: + description: force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources. + type: boolean + image: + description: image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, version is ignored. When image is set, version should be empty. When image is set, architecture cannot be specified. + type: string + version: + description: version is a semantic version identifying the update version. version is ignored if image is specified and required if architecture is specified. + type: string + x-kubernetes-validations: + - rule: 'has(self.architecture) && has(self.image) ? (self.architecture == '''' || self.image == '''') : true' + message: cannot set both Architecture and Image + - rule: 'has(self.architecture) && self.architecture != '''' ? self.version != '''' : true' + message: Version must be set if Architecture is set + overrides: + description: overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object. + type: array + items: + description: ComponentOverride allows overriding cluster version operator's behavior for a component. + type: object + required: + - group + - kind + - name + - namespace + - unmanaged + properties: + group: + description: group identifies the API group that the kind is in. + type: string + kind: + description: kind indentifies which object to override. + type: string + name: + description: name is the component's name. + type: string + namespace: + description: namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty. + type: string + unmanaged: + description: 'unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false' + type: boolean + x-kubernetes-list-map-keys: + - kind + - group + - namespace + - name + x-kubernetes-list-type: map + signatureStores: + description: "signatureStores contains the upstream URIs to verify release signatures and optional reference to a config map by name containing the PEM-encoded CA bundle. \n By default, CVO will use existing signature stores if this property is empty. The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature in these stores in parallel only when local ConfigMaps did not include a valid signature. Validation will fail if none of the signature stores reply with valid signature before timeout. Setting signatureStores will replace the default signature stores with custom signature stores. Default stores can be used with custom signature stores by adding them manually. \n A maximum of 32 signature stores may be configured." + type: array + maxItems: 32 + items: + description: SignatureStore represents the URL of custom Signature Store + type: object + required: + - url + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the signature store is not honored. If the specified ca data is not valid, the signature store is not honored. If empty, we fall back to the CA configured via Proxy, which is appended to the default system roots. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + url: + description: url contains the upstream custom signature store URL. url should be a valid absolute http/https URI of an upstream signature store as per rfc1738. This must be provided and cannot be empty. + type: string + x-kubernetes-validations: + - rule: isURL(self) + message: url must be a valid absolute URL + x-kubernetes-list-map-keys: + - url + x-kubernetes-list-type: map + upstream: + description: upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region. + type: string + status: + description: status contains information about the available updates and any in-progress updates. + type: object + required: + - availableUpdates + - desired + - observedGeneration + - versionHash + properties: + availableUpdates: + description: availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified. + type: array + items: + description: Release represents an OpenShift release image and associated metadata. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + x-kubernetes-list-type: set + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + nullable: true + x-kubernetes-list-type: atomic + capabilities: + description: capabilities describes the state of optional, core cluster components. + type: object + properties: + enabledCapabilities: + description: enabledCapabilities lists all the capabilities that are currently managed. + type: array + items: + description: ClusterVersionCapability enumerates optional, core cluster components. + type: string + enum: + - openshift-samples + - baremetal + - marketplace + - Console + - Insights + - Storage + - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig + - ImageRegistry + - OperatorLifecycleManager + - CloudCredential + x-kubernetes-list-type: atomic + knownCapabilities: + description: knownCapabilities lists all the capabilities known to the current cluster. + type: array + items: + description: ClusterVersionCapability enumerates optional, core cluster components. + type: string + enum: + - openshift-samples + - baremetal + - marketplace + - Console + - Insights + - Storage + - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig + - ImageRegistry + - OperatorLifecycleManager + - CloudCredential + x-kubernetes-list-type: atomic + conditionalUpdates: + description: conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified. + type: array + items: + description: ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster. + type: object + required: + - release + - risks + properties: + conditions: + description: 'conditions represents the observations of the conditional update''s current status. Known types are: * Evaluating, for whether the cluster-version operator will attempt to evaluate any risks[].matchingRules. * Recommended, for whether the update is recommended for the current cluster.' + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + release: + description: release is the target of the update. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + x-kubernetes-list-type: set + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + risks: + description: risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update. + type: array + minItems: 1 + items: + description: ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update. + type: object + required: + - matchingRules + - message + - name + - url + properties: + matchingRules: + description: matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended. + type: array + minItems: 1 + items: + description: ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate. + type: object + required: + - type + properties: + promql: + description: promQL represents a cluster condition based on PromQL. + type: object + required: + - promql + properties: + promql: + description: PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures. + type: string + type: + description: type represents the cluster-condition type. This defines the members and semantics of any additional properties. + type: string + enum: + - Always + - PromQL + x-kubernetes-list-type: atomic + message: + description: message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + minLength: 1 + name: + description: name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state. + type: string + minLength: 1 + url: + description: url contains information about this risk. + type: string + format: uri + minLength: 1 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-list-type: atomic + conditions: + description: conditions provides information about the cluster version. The condition "Available" is set to true if the desiredUpdate has been reached. The condition "Progressing" is set to true if an update is being applied. The condition "Degraded" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation. + type: array + items: + description: ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components. + type: object + required: + - lastTransitionTime + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update to the current status property. + type: string + format: date-time + message: + description: message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + reason: + description: reason is the CamelCase reason for the condition's current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + desired: + description: desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + x-kubernetes-list-type: set + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + history: + description: history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved. + type: array + items: + description: UpdateHistory is a single attempted update to the cluster. + type: object + required: + - completionTime + - image + - startedTime + - state + - verified + properties: + acceptedRisks: + description: acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets. + type: string + completionTime: + description: completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update). + type: string + format: date-time + nullable: true + image: + description: image is a container image location that contains the update. This value is always populated. + type: string + startedTime: + description: startedTime is the time at which the update was started. + type: string + format: date-time + state: + description: state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied). + type: string + verified: + description: verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted. + type: boolean + version: + description: version is a semantic version identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty. + type: string + x-kubernetes-list-type: atomic + observedGeneration: + description: observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version. + type: integer + format: int64 + versionHash: + description: versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only. + type: string + x-kubernetes-validations: + - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''baremetal'' in self.spec.capabilities.additionalEnabledCapabilities ? ''MachineAPI'' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && ''MachineAPI'' in self.status.capabilities.enabledCapabilities) : true' + message: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability + - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''marketplace'' in self.spec.capabilities.additionalEnabledCapabilities ? ''OperatorLifecycleManager'' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && ''OperatorLifecycleManager'' in self.status.capabilities.enabledCapabilities) : true' + message: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml index 1895f9d33e..6bff43a781 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml @@ -129,11 +129,11 @@ spec: description: name is the metadata.name of the referenced secret type: string tlsSecurityProfile: - description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12." + description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available minTLSVersion is VersionTLS12." type: object properties: custom: - description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1" + description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: VersionTLS11" type: object properties: ciphers: @@ -142,7 +142,7 @@ spec: items: type: string minTLSVersion: - description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: VersionTLS11 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" type: string enum: - VersionTLS10 @@ -151,15 +151,15 @@ spec: - VersionTLS13 nullable: true intermediate: - description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2" + description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: VersionTLS12" type: object nullable: true modern: - description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: VersionTLS13 \n NOTE: Currently unsupported." type: object nullable: true old: - description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: VersionTLS10" type: object nullable: true type: diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml index 7edc7f23a7..bcf63f749c 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml @@ -129,11 +129,11 @@ spec: description: name is the metadata.name of the referenced secret type: string tlsSecurityProfile: - description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12." + description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available minTLSVersion is VersionTLS12." type: object properties: custom: - description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1" + description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: VersionTLS11" type: object properties: ciphers: @@ -142,7 +142,7 @@ spec: items: type: string minTLSVersion: - description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: VersionTLS11 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" type: string enum: - VersionTLS10 @@ -151,15 +151,15 @@ spec: - VersionTLS13 nullable: true intermediate: - description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2" + description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: VersionTLS12" type: object nullable: true modern: - description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: VersionTLS13 \n NOTE: Currently unsupported." type: object nullable: true old: - description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: VersionTLS10" type: object nullable: true type: diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml index 8ce5214c1d..e5adf12046 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml @@ -129,11 +129,11 @@ spec: description: name is the metadata.name of the referenced secret type: string tlsSecurityProfile: - description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12." + description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available minTLSVersion is VersionTLS12." type: object properties: custom: - description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1" + description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: VersionTLS11" type: object properties: ciphers: @@ -142,7 +142,7 @@ spec: items: type: string minTLSVersion: - description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: VersionTLS11 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" type: string enum: - VersionTLS10 @@ -151,15 +151,15 @@ spec: - VersionTLS13 nullable: true intermediate: - description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2" + description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: VersionTLS12" type: object nullable: true modern: - description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: VersionTLS13 \n NOTE: Currently unsupported." type: object nullable: true old: - description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: VersionTLS10" type: object nullable: true type: diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml new file mode 100644 index 0000000000..597344ff0c --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml @@ -0,0 +1,374 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: authentications.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Authentication + listKind: AuthenticationList + plural: authentications + singular: authentication + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + "schema": + "openAPIV3Schema": + description: "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + oauthMetadata: + description: 'oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key "oauthMetadata" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + oidcProviders: + description: "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\". \n At most one provider can be configured." + type: array + maxItems: 1 + items: + type: object + required: + - issuer + - name + properties: + claimMappings: + description: ClaimMappings describes rules on how to transform information from an ID token into a cluster identity + type: object + properties: + groups: + description: Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values. + type: object + required: + - claim + properties: + claim: + description: Claim is a JWT token claim to be used in the mapping + type: string + prefix: + description: "Prefix is a string to prefix the value from the token in the result of the claim mapping. \n By default, no prefixing occurs. \n Example: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\"." + type: string + username: + description: "Username is a name of the claim that should be used to construct usernames for the cluster identity. \n Default value: \"sub\"" + type: object + required: + - claim + properties: + claim: + description: Claim is a JWT token claim to be used in the mapping + type: string + prefix: + type: object + required: + - prefixString + properties: + prefixString: + type: string + minLength: 1 + prefixPolicy: + description: "PrefixPolicy specifies how a prefix should apply. \n By default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins. \n Set to \"NoPrefix\" to disable prefixing. \n Example: (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\". If the JWT claim `username` contains value `userA`, the resulting mapped value will be \"myoidc:userA\". (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the JWT `email` claim contains value \"userA@myoidc.tld\", the resulting mapped value will be \"myoidc:userA@myoidc.tld\". (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\", and `claim` is set to: (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\" (b) \"email\": the mapped value will be \"userA@myoidc.tld\"" + type: string + enum: + - "" + - NoPrefix + - Prefix + x-kubernetes-validations: + - rule: 'has(self.prefixPolicy) && self.prefixPolicy == ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)' + message: prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise + claimValidationRules: + description: ClaimValidationRules are rules that are applied to validate token claims to authenticate users. + type: array + items: + type: object + properties: + requiredClaim: + description: RequiredClaim allows configuring a required claim name and its expected value + type: object + required: + - claim + - requiredValue + properties: + claim: + description: Claim is a name of a required claim. Only claims with string values are supported. + type: string + minLength: 1 + requiredValue: + description: RequiredValue is the required value for the claim. + type: string + minLength: 1 + type: + description: Type sets the type of the validation rule + type: string + default: RequiredClaim + enum: + - RequiredClaim + x-kubernetes-list-type: atomic + issuer: + description: Issuer describes atributes of the OIDC token issuer + type: object + required: + - audiences + - issuerURL + properties: + audiences: + description: Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their "aud" claim. Must be set to exactly one value. + type: array + maxItems: 1 + items: + type: string + minLength: 1 + x-kubernetes-list-type: set + issuerCertificateAuthority: + description: CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the "ca-bundle.crt" key. If unset, system trust is used instead. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + issuerURL: + description: URL is the serving URL of the token issuer. Must use the https:// scheme. + type: string + pattern: ^https:\/\/[^\s] + name: + description: Name of the OIDC provider + type: string + minLength: 1 + oidcClients: + description: OIDCClients contains configuration for the platform's clients that need to request tokens from the issuer + type: array + maxItems: 20 + items: + type: object + required: + - clientID + - componentName + - componentNamespace + properties: + clientID: + description: ClientID is the identifier of the OIDC client from the OIDC provider + type: string + minLength: 1 + clientSecret: + description: ClientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + componentName: + description: ComponentName is the name of the component that is supposed to consume this client configuration + type: string + maxLength: 256 + minLength: 1 + componentNamespace: + description: ComponentNamespace is the namespace of the component that is supposed to consume this client configuration + type: string + maxLength: 63 + minLength: 1 + extraScopes: + description: ExtraScopes is an optional set of scopes to request tokens with. + type: array + items: + type: string + x-kubernetes-list-type: set + x-kubernetes-list-map-keys: + - componentNamespace + - componentName + x-kubernetes-list-type: map + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + serviceAccountIssuer: + description: 'serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.' + type: string + type: + description: type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth. + type: string + webhookTokenAuthenticator: + description: "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. \n Can only be set if \"Type\" is set to \"None\"." + type: object + required: + - kubeConfig + properties: + kubeConfig: + description: "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config. \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication \n The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored." + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + webhookTokenAuthenticators: + description: webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + type: array + items: + description: deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. + type: object + properties: + kubeConfig: + description: 'kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key "kubeConfig" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + x-kubernetes-list-type: atomic + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + integratedOAuthMetadata: + description: 'integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key "oauthMetadata" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + oidcClients: + description: OIDCClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin. + type: array + maxItems: 20 + items: + type: object + required: + - componentName + - componentNamespace + properties: + componentName: + description: ComponentName is the name of the component that will consume a client configuration. + type: string + maxLength: 256 + minLength: 1 + componentNamespace: + description: ComponentNamespace is the namespace of the component that will consume a client configuration. + type: string + maxLength: 63 + minLength: 1 + conditions: + description: "Conditions are used to communicate the state of the `oidcClients` entry. \n Supported conditions include Available, Degraded and Progressing. \n If Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry." + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + consumingUsers: + description: ConsumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret. + type: array + maxItems: 5 + items: + description: ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. + type: string + maxLength: 512 + minLength: 1 + pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + x-kubernetes-list-type: set + currentOIDCClients: + description: CurrentOIDCClients is a list of clients that the component is currently using. + type: array + items: + type: object + required: + - clientID + - issuerURL + - oidcProviderName + properties: + clientID: + description: ClientID is the identifier of the OIDC client from the OIDC provider + type: string + minLength: 1 + issuerURL: + description: URL is the serving URL of the token issuer. Must use the https:// scheme. + type: string + pattern: ^https:\/\/[^\s] + oidcProviderName: + description: OIDCName refers to the `name` of the provider from `oidcProviders` + type: string + minLength: 1 + x-kubernetes-list-map-keys: + - issuerURL + - clientID + x-kubernetes-list-type: map + x-kubernetes-list-map-keys: + - componentNamespace + - componentName + x-kubernetes-list-type: map + x-kubernetes-validations: + - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))' + message: all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml new file mode 100644 index 0000000000..d475dec957 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml @@ -0,0 +1,374 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: authentications.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Authentication + listKind: AuthenticationList + plural: authentications + singular: authentication + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + "schema": + "openAPIV3Schema": + description: "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + oauthMetadata: + description: 'oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key "oauthMetadata" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + oidcProviders: + description: "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\". \n At most one provider can be configured." + type: array + maxItems: 1 + items: + type: object + required: + - issuer + - name + properties: + claimMappings: + description: ClaimMappings describes rules on how to transform information from an ID token into a cluster identity + type: object + properties: + groups: + description: Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values. + type: object + required: + - claim + properties: + claim: + description: Claim is a JWT token claim to be used in the mapping + type: string + prefix: + description: "Prefix is a string to prefix the value from the token in the result of the claim mapping. \n By default, no prefixing occurs. \n Example: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\"." + type: string + username: + description: "Username is a name of the claim that should be used to construct usernames for the cluster identity. \n Default value: \"sub\"" + type: object + required: + - claim + properties: + claim: + description: Claim is a JWT token claim to be used in the mapping + type: string + prefix: + type: object + required: + - prefixString + properties: + prefixString: + type: string + minLength: 1 + prefixPolicy: + description: "PrefixPolicy specifies how a prefix should apply. \n By default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins. \n Set to \"NoPrefix\" to disable prefixing. \n Example: (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\". If the JWT claim `username` contains value `userA`, the resulting mapped value will be \"myoidc:userA\". (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the JWT `email` claim contains value \"userA@myoidc.tld\", the resulting mapped value will be \"myoidc:userA@myoidc.tld\". (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\", and `claim` is set to: (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\" (b) \"email\": the mapped value will be \"userA@myoidc.tld\"" + type: string + enum: + - "" + - NoPrefix + - Prefix + x-kubernetes-validations: + - rule: 'has(self.prefixPolicy) && self.prefixPolicy == ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)' + message: prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise + claimValidationRules: + description: ClaimValidationRules are rules that are applied to validate token claims to authenticate users. + type: array + items: + type: object + properties: + requiredClaim: + description: RequiredClaim allows configuring a required claim name and its expected value + type: object + required: + - claim + - requiredValue + properties: + claim: + description: Claim is a name of a required claim. Only claims with string values are supported. + type: string + minLength: 1 + requiredValue: + description: RequiredValue is the required value for the claim. + type: string + minLength: 1 + type: + description: Type sets the type of the validation rule + type: string + default: RequiredClaim + enum: + - RequiredClaim + x-kubernetes-list-type: atomic + issuer: + description: Issuer describes atributes of the OIDC token issuer + type: object + required: + - audiences + - issuerURL + properties: + audiences: + description: Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their "aud" claim. Must be set to exactly one value. + type: array + maxItems: 1 + items: + type: string + minLength: 1 + x-kubernetes-list-type: set + issuerCertificateAuthority: + description: CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the "ca-bundle.crt" key. If unset, system trust is used instead. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + issuerURL: + description: URL is the serving URL of the token issuer. Must use the https:// scheme. + type: string + pattern: ^https:\/\/[^\s] + name: + description: Name of the OIDC provider + type: string + minLength: 1 + oidcClients: + description: OIDCClients contains configuration for the platform's clients that need to request tokens from the issuer + type: array + maxItems: 20 + items: + type: object + required: + - clientID + - componentName + - componentNamespace + properties: + clientID: + description: ClientID is the identifier of the OIDC client from the OIDC provider + type: string + minLength: 1 + clientSecret: + description: ClientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + componentName: + description: ComponentName is the name of the component that is supposed to consume this client configuration + type: string + maxLength: 256 + minLength: 1 + componentNamespace: + description: ComponentNamespace is the namespace of the component that is supposed to consume this client configuration + type: string + maxLength: 63 + minLength: 1 + extraScopes: + description: ExtraScopes is an optional set of scopes to request tokens with. + type: array + items: + type: string + x-kubernetes-list-type: set + x-kubernetes-list-map-keys: + - componentNamespace + - componentName + x-kubernetes-list-type: map + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + serviceAccountIssuer: + description: 'serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.' + type: string + type: + description: type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth. + type: string + webhookTokenAuthenticator: + description: "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. \n Can only be set if \"Type\" is set to \"None\"." + type: object + required: + - kubeConfig + properties: + kubeConfig: + description: "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config. \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication \n The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored." + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + webhookTokenAuthenticators: + description: webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + type: array + items: + description: deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. + type: object + properties: + kubeConfig: + description: 'kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key "kubeConfig" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + x-kubernetes-list-type: atomic + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + integratedOAuthMetadata: + description: 'integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key "oauthMetadata" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + oidcClients: + description: OIDCClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin. + type: array + maxItems: 20 + items: + type: object + required: + - componentName + - componentNamespace + properties: + componentName: + description: ComponentName is the name of the component that will consume a client configuration. + type: string + maxLength: 256 + minLength: 1 + componentNamespace: + description: ComponentNamespace is the namespace of the component that will consume a client configuration. + type: string + maxLength: 63 + minLength: 1 + conditions: + description: "Conditions are used to communicate the state of the `oidcClients` entry. \n Supported conditions include Available, Degraded and Progressing. \n If Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry." + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + consumingUsers: + description: ConsumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret. + type: array + maxItems: 5 + items: + description: ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. + type: string + maxLength: 512 + minLength: 1 + pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + x-kubernetes-list-type: set + currentOIDCClients: + description: CurrentOIDCClients is a list of clients that the component is currently using. + type: array + items: + type: object + required: + - clientID + - issuerURL + - oidcProviderName + properties: + clientID: + description: ClientID is the identifier of the OIDC client from the OIDC provider + type: string + minLength: 1 + issuerURL: + description: URL is the serving URL of the token issuer. Must use the https:// scheme. + type: string + pattern: ^https:\/\/[^\s] + oidcProviderName: + description: OIDCName refers to the `name` of the provider from `oidcProviders` + type: string + minLength: 1 + x-kubernetes-list-map-keys: + - issuerURL + - clientID + x-kubernetes-list-type: map + x-kubernetes-list-map-keys: + - componentNamespace + - componentName + x-kubernetes-list-type: map + x-kubernetes-validations: + - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))' + message: all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml index facf7c6b09..b0cd9e67fc 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml @@ -6,6 +6,7 @@ metadata: include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: Default name: authentications.config.openshift.io spec: group: config.openshift.io @@ -52,7 +53,7 @@ spec: description: type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth. type: string webhookTokenAuthenticator: - description: webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. + description: "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. \n Can only be set if \"Type\" is set to \"None\"." type: object required: - kubeConfig @@ -82,6 +83,7 @@ spec: name: description: name is the metadata.name of the referenced secret type: string + x-kubernetes-list-type: atomic status: description: status holds observed values from the cluster. They may not be overridden. type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml index 1b96b19c7e..5aece24ff9 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml @@ -99,6 +99,80 @@ spec: nutanix: description: Nutanix contains settings specific to the Nutanix infrastructure provider. properties: + failureDomains: + description: failureDomains configures failure domains information for the Nutanix platform. When set, the failure domains defined here may be used to spread Machines across prism element clusters to improve fault tolerance of the cluster. + items: + description: NutanixFailureDomain configures failure domain information for the Nutanix platform. + properties: + cluster: + description: cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API. + properties: + name: + description: name is the resource name in the PC. It cannot be empty if the type is Name. + type: string + type: + description: type is the identifier type to use for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when type is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) : !has(self.uuid)' + - message: name configuration is required when type is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' + name: + description: name defines the unique name of a failure domain. Name is required and must be at most 64 characters in length. It must consist of only lower case alphanumeric characters and hyphens (-). It must start and end with an alphanumeric character. This value is arbitrary and is used to identify the failure domain within the platform. + maxLength: 64 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + subnets: + description: subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API. + items: + description: NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) + properties: + name: + description: name is the resource name in the PC. It cannot be empty if the type is Name. + type: string + type: + description: type is the identifier type to use for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when type is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) : !has(self.uuid)' + - message: name configuration is required when type is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' + maxItems: 1 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + required: + - cluster + - name + - subnets + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map prismCentral: description: prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. properties: @@ -268,6 +342,12 @@ spec: maxLength: 2048 pattern: ^/.*?/host/.*?/Resources.* type: string + template: + description: "template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters. \n When omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea." + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string required: - computeCluster - datacenter @@ -640,6 +720,16 @@ spec: gcp: description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. properties: + clusterHostedDNS: + default: Disabled + description: clusterHostedDNS indicates the type of DNS solution in use within the cluster. Its default value of "Disabled" indicates that the cluster's DNS is the default provided by the cloud platform. It can be "Enabled" during install to bypass the configuration of the cloud default DNS. When "Enabled", the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. The cluster's use of the cloud's Load Balancers is unaffected by this setting. The value is immutable after it has been set at install time. Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. Enabling this functionality allows the user to start their own DNS solution outside the cluster after installation is complete. The customer would be responsible for configuring this custom DNS solution, and it can be run in addition to the in-cluster DNS solution. + enum: + - Enabled + - Disabled + type: string + x-kubernetes-validations: + - message: clusterHostedDNS is immutable and may only be configured during installation + rule: self == oldSelf projectID: description: resourceGroupName is the Project ID for new GCP resources created for the cluster. type: string @@ -655,7 +745,7 @@ spec: description: key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` and `openshift-io`. maxLength: 63 minLength: 1 - pattern: ^[a-z][0-9a-z_-]+$ + pattern: ^[a-z][0-9a-z_-]{0,62}$ type: string x-kubernetes-validations: - message: label keys must not start with either `openshift-io` or `kubernetes-io` @@ -664,7 +754,7 @@ spec: description: value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. Value must contain only lowercase letters, numeric characters, and the following special characters `_-`. maxLength: 63 minLength: 1 - pattern: ^[0-9a-z_-]+$ + pattern: ^[0-9a-z_-]{1,63}$ type: string required: - key @@ -744,9 +834,19 @@ spec: description: IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services. properties: name: - description: name is the name of the IBM Cloud service. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com` - maxLength: 32 - pattern: ^[a-zA-Z0-9-]+$ + description: 'name is the name of the IBM Cloud service. Possible values are: CIS, COS, DNSServices, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`' + enum: + - CIS + - COS + - DNSServices + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - ResourceController + - ResourceManager + - VPC type: string url: description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml index 8e58063098..8916d77b5b 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml @@ -99,6 +99,80 @@ spec: nutanix: description: Nutanix contains settings specific to the Nutanix infrastructure provider. properties: + failureDomains: + description: failureDomains configures failure domains information for the Nutanix platform. When set, the failure domains defined here may be used to spread Machines across prism element clusters to improve fault tolerance of the cluster. + items: + description: NutanixFailureDomain configures failure domain information for the Nutanix platform. + properties: + cluster: + description: cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API. + properties: + name: + description: name is the resource name in the PC. It cannot be empty if the type is Name. + type: string + type: + description: type is the identifier type to use for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when type is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) : !has(self.uuid)' + - message: name configuration is required when type is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' + name: + description: name defines the unique name of a failure domain. Name is required and must be at most 64 characters in length. It must consist of only lower case alphanumeric characters and hyphens (-). It must start and end with an alphanumeric character. This value is arbitrary and is used to identify the failure domain within the platform. + maxLength: 64 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + subnets: + description: subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API. + items: + description: NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) + properties: + name: + description: name is the resource name in the PC. It cannot be empty if the type is Name. + type: string + type: + description: type is the identifier type to use for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when type is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) : !has(self.uuid)' + - message: name configuration is required when type is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' + maxItems: 1 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + required: + - cluster + - name + - subnets + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map prismCentral: description: prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. properties: @@ -655,9 +729,19 @@ spec: description: IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services. properties: name: - description: name is the name of the IBM Cloud service. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com` - maxLength: 32 - pattern: ^[a-zA-Z0-9-]+$ + description: 'name is the name of the IBM Cloud service. Possible values are: CIS, COS, DNSServices, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`' + enum: + - CIS + - COS + - DNSServices + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - ResourceController + - ResourceManager + - VPC type: string url: description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml index 1b84d0ae6f..f9e91cca4e 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml @@ -99,6 +99,80 @@ spec: nutanix: description: Nutanix contains settings specific to the Nutanix infrastructure provider. properties: + failureDomains: + description: failureDomains configures failure domains information for the Nutanix platform. When set, the failure domains defined here may be used to spread Machines across prism element clusters to improve fault tolerance of the cluster. + items: + description: NutanixFailureDomain configures failure domain information for the Nutanix platform. + properties: + cluster: + description: cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API. + properties: + name: + description: name is the resource name in the PC. It cannot be empty if the type is Name. + type: string + type: + description: type is the identifier type to use for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when type is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) : !has(self.uuid)' + - message: name configuration is required when type is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' + name: + description: name defines the unique name of a failure domain. Name is required and must be at most 64 characters in length. It must consist of only lower case alphanumeric characters and hyphens (-). It must start and end with an alphanumeric character. This value is arbitrary and is used to identify the failure domain within the platform. + maxLength: 64 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + subnets: + description: subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API. + items: + description: NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) + properties: + name: + description: name is the resource name in the PC. It cannot be empty if the type is Name. + type: string + type: + description: type is the identifier type to use for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when type is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) : !has(self.uuid)' + - message: name configuration is required when type is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' + maxItems: 1 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + required: + - cluster + - name + - subnets + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map prismCentral: description: prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. properties: @@ -268,6 +342,12 @@ spec: maxLength: 2048 pattern: ^/.*?/host/.*?/Resources.* type: string + template: + description: "template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters. \n When omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea." + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string required: - computeCluster - datacenter @@ -640,6 +720,16 @@ spec: gcp: description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. properties: + clusterHostedDNS: + default: Disabled + description: clusterHostedDNS indicates the type of DNS solution in use within the cluster. Its default value of "Disabled" indicates that the cluster's DNS is the default provided by the cloud platform. It can be "Enabled" during install to bypass the configuration of the cloud default DNS. When "Enabled", the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. The cluster's use of the cloud's Load Balancers is unaffected by this setting. The value is immutable after it has been set at install time. Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. Enabling this functionality allows the user to start their own DNS solution outside the cluster after installation is complete. The customer would be responsible for configuring this custom DNS solution, and it can be run in addition to the in-cluster DNS solution. + enum: + - Enabled + - Disabled + type: string + x-kubernetes-validations: + - message: clusterHostedDNS is immutable and may only be configured during installation + rule: self == oldSelf projectID: description: resourceGroupName is the Project ID for new GCP resources created for the cluster. type: string @@ -655,7 +745,7 @@ spec: description: key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` and `openshift-io`. maxLength: 63 minLength: 1 - pattern: ^[a-z][0-9a-z_-]+$ + pattern: ^[a-z][0-9a-z_-]{0,62}$ type: string x-kubernetes-validations: - message: label keys must not start with either `openshift-io` or `kubernetes-io` @@ -664,7 +754,7 @@ spec: description: value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. Value must contain only lowercase letters, numeric characters, and the following special characters `_-`. maxLength: 63 minLength: 1 - pattern: ^[0-9a-z_-]+$ + pattern: ^[0-9a-z_-]{1,63}$ type: string required: - key @@ -744,9 +834,19 @@ spec: description: IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services. properties: name: - description: name is the name of the IBM Cloud service. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com` - maxLength: 32 - pattern: ^[a-zA-Z0-9-]+$ + description: 'name is the name of the IBM Cloud service. Possible values are: CIS, COS, DNSServices, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`' + enum: + - CIS + - COS + - DNSServices + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - ResourceController + - ResourceManager + - VPC type: string url: description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-CustomNoUpgrade.crd.yaml new file mode 100644 index 0000000000..8ae50ae791 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-CustomNoUpgrade.crd.yaml @@ -0,0 +1,211 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: networks.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. + type: object + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. This field is immutable after installation. + type: array + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + type: integer + format: int32 + minimum: 0 + externalIP: + description: externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set. + type: object + properties: + autoAssignCIDRs: + description: autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called "IngressIPs". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided. + type: array + items: + type: string + policy: + description: policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set. + type: object + properties: + allowedCIDRs: + description: allowedCIDRs is the list of allowed CIDRs. + type: array + items: + type: string + rejectedCIDRs: + description: rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs. + type: array + items: + type: string + networkType: + description: 'NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.' + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation. + type: array + items: + type: string + serviceNodePortRange: + description: The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed. + type: string + pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. + type: array + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + type: integer + format: int32 + minimum: 0 + clusterNetworkMTU: + description: ClusterNetworkMTU is the MTU for inter-pod networking. + type: integer + conditions: + description: 'conditions represents the observations of a network.config current state. Known .status.conditions.type are: "NetworkTypeMigrationInProgress", "NetworkTypeMigrationMTUReady", "NetworkTypeMigrationTargetCNIAvailable", "NetworkTypeMigrationTargetCNIInUse" and "NetworkTypeMigrationOriginalCNIPurged"' + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + migration: + description: Migration contains the cluster network migration configuration. + type: object + properties: + mtu: + description: MTU contains the MTU migration configuration. + type: object + properties: + machine: + description: Machine contains MTU migration configuration for the machine's uplink. + type: object + properties: + from: + description: From is the MTU to migrate from. + type: integer + format: int32 + minimum: 0 + to: + description: To is the MTU to migrate to. + type: integer + format: int32 + minimum: 0 + network: + description: Network contains MTU migration configuration for the default network. + type: object + properties: + from: + description: From is the MTU to migrate from. + type: integer + format: int32 + minimum: 0 + to: + description: To is the MTU to migrate to. + type: integer + format: int32 + minimum: 0 + networkType: + description: 'NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes' + type: string + enum: + - OpenShiftSDN + - OVNKubernetes + networkType: + description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support a single entry here. + type: array + items: + type: string + served: true + storage: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-Default.crd.yaml similarity index 99% rename from vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml rename to vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-Default.crd.yaml index c011785061..4582bf8fdf 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-Default.crd.yaml @@ -6,6 +6,7 @@ metadata: include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: Default name: networks.config.openshift.io spec: group: config.openshift.io diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..e4981c183a --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,211 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: networks.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. + type: object + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. This field is immutable after installation. + type: array + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + type: integer + format: int32 + minimum: 0 + externalIP: + description: externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set. + type: object + properties: + autoAssignCIDRs: + description: autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called "IngressIPs". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided. + type: array + items: + type: string + policy: + description: policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set. + type: object + properties: + allowedCIDRs: + description: allowedCIDRs is the list of allowed CIDRs. + type: array + items: + type: string + rejectedCIDRs: + description: rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs. + type: array + items: + type: string + networkType: + description: 'NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.' + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation. + type: array + items: + type: string + serviceNodePortRange: + description: The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed. + type: string + pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. + type: array + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + type: integer + format: int32 + minimum: 0 + clusterNetworkMTU: + description: ClusterNetworkMTU is the MTU for inter-pod networking. + type: integer + conditions: + description: 'conditions represents the observations of a network.config current state. Known .status.conditions.type are: "NetworkTypeMigrationInProgress", "NetworkTypeMigrationMTUReady", "NetworkTypeMigrationTargetCNIAvailable", "NetworkTypeMigrationTargetCNIInUse" and "NetworkTypeMigrationOriginalCNIPurged"' + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + migration: + description: Migration contains the cluster network migration configuration. + type: object + properties: + mtu: + description: MTU contains the MTU migration configuration. + type: object + properties: + machine: + description: Machine contains MTU migration configuration for the machine's uplink. + type: object + properties: + from: + description: From is the MTU to migrate from. + type: integer + format: int32 + minimum: 0 + to: + description: To is the MTU to migrate to. + type: integer + format: int32 + minimum: 0 + network: + description: Network contains MTU migration configuration for the default network. + type: object + properties: + from: + description: From is the MTU to migrate from. + type: integer + format: int32 + minimum: 0 + to: + description: To is the MTU to migrate to. + type: integer + format: int32 + minimum: 0 + networkType: + description: 'NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes' + type: string + enum: + - OpenShiftSDN + - OVNKubernetes + networkType: + description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support a single entry here. + type: array + items: + type: string + served: true + storage: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_openshift-controller-manager-operator_01_build.crd.yaml similarity index 99% rename from vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml rename to vendor/github.com/openshift/api/config/v1/0000_10_openshift-controller-manager-operator_01_build.crd.yaml index 75166deb7c..9e80775ffe 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_openshift-controller-manager-operator_01_build.crd.yaml @@ -3,6 +3,7 @@ kind: CustomResourceDefinition metadata: annotations: api-approved.openshift.io: https://github.com/openshift/api/pull/470 + capability.openshift.io/name: Build include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" diff --git a/vendor/github.com/openshift/api/config/v1/custom.authentication.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.authentication.testsuite.yaml new file mode 100644 index 0000000000..1dccb080ac --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/custom.authentication.testsuite.yaml @@ -0,0 +1,273 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[CustomNoUpgrade] Authentication" +crd: 0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml +tests: + onCreate: + - name: Should be able to create a minimal Authentication + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: {} # No spec is required for a Authentication + expected: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: {} + - name: Cannot set username claim prefix with policy NoPrefix + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: NoPrefix + prefix: + prefixString: "myoidc:" + expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" + - name: Can set username claim prefix with policy Prefix + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: Prefix + prefix: + prefixString: "myoidc:" + expected: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: Prefix + prefix: + prefixString: "myoidc:" + - name: Cannot leave username claim prefix blank with policy Prefix + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: Prefix + expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" + - name: Can set OIDC providers with no username prefixing + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: NoPrefix + expected: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: NoPrefix + onUpdate: + - name: Updating OIDC provider with a client that's not in the status + initial: &initConfig | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + oidcClients: + - componentNamespace: namespace + componentName: preexisting + clientID: someclient + - componentNamespace: namespace + componentName: name + clientID: legitclient + status: + oidcClients: + - componentNamespace: namespace + componentName: name + - componentNamespace: namespace2 + componentName: name2 + - componentNamespace: namespace2 + componentName: name3 + updated: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + oidcClients: + - componentNamespace: namespace + componentName: preexisting + clientID: someclient + - componentNamespace: namespace + componentName: name + clientID: legitclient + - componentNamespace: dif-namespace # new client here + componentName: tehName + clientID: cool-client + status: + oidcClients: + - componentNamespace: namespace + componentName: name + - componentNamespace: namespace2 + componentName: name2 + - componentNamespace: namespace2 + componentName: name3 + expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" + - name: Updating OIDC provider with a client that's different from the previous one + initial: *initConfig + updated: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + oidcClients: + - componentNamespace: dif-namespace + componentName: tehName + clientID: cool-client + status: + oidcClients: + - componentNamespace: namespace + componentName: name + - componentNamespace: namespace2 + componentName: name2 + - componentNamespace: namespace2 + componentName: name3 + expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" + - name: Updating previously existing client + initial: *initConfig + updated: &prevExistingUpdated | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + oidcClients: + - componentNamespace: namespace + componentName: preexisting + clientID: different-client + status: + oidcClients: + - componentNamespace: namespace + componentName: name + - componentNamespace: namespace2 + componentName: name2 + - componentNamespace: namespace2 + componentName: name3 + expected: *prevExistingUpdated + - name: Removing a configured client from the status (== component unregister) + initial: *initConfig + updated: &removeFromStatus | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + oidcClients: + - componentNamespace: namespace + componentName: preexisting + clientID: different-client + - componentNamespace: namespace + componentName: name + clientID: legitclient + status: + oidcClients: + - componentNamespace: namespace2 + componentName: name2 + - componentNamespace: namespace2 + componentName: name3 + expected: *removeFromStatus + - name: Simply add a valid client + initial: *initConfig + updated: &addClient | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + oidcClients: + - componentNamespace: namespace + componentName: preexisting + clientID: different-client + - componentNamespace: namespace + componentName: name + clientID: legitclient + - componentNamespace: namespace2 + componentName: name3 + clientID: justavalidclient + status: + oidcClients: + - componentNamespace: namespace + componentName: name + - componentNamespace: namespace2 + componentName: name2 + - componentNamespace: namespace2 + componentName: name3 + expected: *addClient diff --git a/vendor/github.com/openshift/api/config/v1/custom.clusterversion.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.clusterversion.testsuite.yaml new file mode 100644 index 0000000000..f3090558b9 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/custom.clusterversion.testsuite.yaml @@ -0,0 +1,472 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[CustomNoUpgrade] ClusterVersion" +crd: 0000_00_cluster-version-operator_01_clusterversion-CustomNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ClusterVersion + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + - name: Should allow image to be set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + image: bar + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + image: bar + - name: Should allow version to be set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + version: 4.11.1 + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + version: 4.11.1 + - name: Should allow architecture to be empty + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: "" + version: 4.11.1 + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: "" + version: 4.11.1 + - name: Should allow architecture and version to be set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + version: 4.11.1 + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + version: 4.11.1 + - name: Version must be set if architecture is set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + expectedError: "Version must be set if Architecture is set" + - name: Should not allow image and architecture to be set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + version: 4.11.1 + image: bar + expectedError: "cannot set both Architecture and Image" + - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities baremetal and MachineAPI + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + - MachineAPI + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + - MachineAPI + - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities baremetal without MachineAPI + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability + - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities marketplace and OperatorLifecycleManager + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + - OperatorLifecycleManager + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + - OperatorLifecycleManager + - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities marketplace without OperatorLifecycleManager + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability + - name: Should be able to set a custom signature store + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: + - url: "https://osus.ocp.com" + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: + - url: "https://osus.ocp.com" + - name: Should be able to set multiple custom signature store + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: + - url: "https://osus1.ocp.com" + - url: "https://osus2.ocp.com" + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: + - url: "https://osus1.ocp.com" + - url: "https://osus2.ocp.com" + - name: Invalid custom signature store should throw error + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: + - url: "osus1.ocp.com" + expectedError: "url must be a valid absolute URL" + - name: Should be able to unset the signature stores + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: [] + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: [] + onUpdate: + - name: Should not allow image to be set if architecture set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + version: 4.11.1 + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + version: 4.11.1 + image: bar + expectedError: "cannot set both Architecture and Image" + - name: Should not allow architecture to be set if image set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + image: bar + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + version: 4.11.1 + image: bar + expectedError: "cannot set both Architecture and Image" + - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, and implicitly enabled MachineAPI + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + status: + desired: + version: foo + image: foo + observedGeneration: 1 + versionHash: foo + availableUpdates: + - version: foo + image: foo + capabilities: + enabledCapabilities: + - MachineAPI + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + status: + desired: + version: foo + image: foo + observedGeneration: 1 + versionHash: foo + availableUpdates: + - version: foo + image: foo + capabilities: + enabledCapabilities: + - MachineAPI + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + status: + desired: + version: foo + image: foo + observedGeneration: 1 + versionHash: foo + availableUpdates: + - version: foo + image: foo + capabilities: + enabledCapabilities: + - MachineAPI + - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, with the Machine API capability + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + - MachineAPI + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + - MachineAPI + - name: Should not be able to add the baremetal capability with a ClusterVersion with base capability None, and without MachineAPI + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability + - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, and implicitly enabled OperatorLifecycleManager + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + status: + desired: + version: foo + image: foo + observedGeneration: 1 + versionHash: foo + availableUpdates: + - version: foo + image: foo + capabilities: + enabledCapabilities: + - OperatorLifecycleManager + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + status: + desired: + version: foo + image: foo + observedGeneration: 1 + versionHash: foo + availableUpdates: + - version: foo + image: foo + capabilities: + enabledCapabilities: + - OperatorLifecycleManager + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + status: + desired: + version: foo + image: foo + observedGeneration: 1 + versionHash: foo + availableUpdates: + - version: foo + image: foo + capabilities: + enabledCapabilities: + - OperatorLifecycleManager + - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, with the OperatorLifecycleManager capability + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + - OperatorLifecycleManager + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + - OperatorLifecycleManager + - name: Should not be able to add the marketplace capability with a ClusterVersion with base capability None, and without OperatorLifecycleManager + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability diff --git a/vendor/github.com/openshift/api/config/v1/custom.network.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.network.testsuite.yaml new file mode 100644 index 0000000000..59e9fbdfff --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/custom.network.testsuite.yaml @@ -0,0 +1,28 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[CustomNoUpgrade] Network" +crd: 0000_10_config-operator_01_network-CustomNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to set status conditions + initial: | + apiVersion: config.openshift.io/v1 + kind: Network + spec: {} # No spec is required for a Network + status: + conditions: + - type: NetworkTypeMigrationInProgress + status: "False" + reason: "Reason" + message: "Message" + lastTransitionTime: "2023-10-25T12:00:00Z" + expected: | + apiVersion: config.openshift.io/v1 + kind: Network + spec: {} + status: + conditions: + - type: NetworkTypeMigrationInProgress + status: "False" + reason: "Reason" + message: "Message" + lastTransitionTime: "2023-10-25T12:00:00Z" diff --git a/vendor/github.com/openshift/api/config/v1/feature_gates.go b/vendor/github.com/openshift/api/config/v1/feature_gates.go index 158487b5a8..dc53db89d2 100644 --- a/vendor/github.com/openshift/api/config/v1/feature_gates.go +++ b/vendor/github.com/openshift/api/config/v1/feature_gates.go @@ -53,16 +53,6 @@ var ( OwningProduct: ocpSpecific, } - FeatureGateRetroactiveDefaultStorageClass = FeatureGateName("RetroactiveDefaultStorageClass") - retroactiveDefaultStorageClass = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateRetroactiveDefaultStorageClass, - }, - OwningJiraComponent: "storage", - ResponsiblePerson: "RomanBednar", - OwningProduct: kubernetes, - } - FeatureGateExternalCloudProvider = FeatureGateName("ExternalCloudProvider") externalCloudProvider = FeatureGateDescription{ FeatureGateAttributes: FeatureGateAttributes{ @@ -272,6 +262,16 @@ var ( OwningProduct: ocpSpecific, } + FeatureGateNetworkLiveMigration = FeatureGateName("NetworkLiveMigration") + sdnLiveMigration = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateNetworkLiveMigration, + }, + OwningJiraComponent: "Networking/ovn-kubernetes", + ResponsiblePerson: "pliu", + OwningProduct: ocpSpecific, + } + FeatureGateAutomatedEtcdBackup = FeatureGateName("AutomatedEtcdBackup") automatedEtcdBackup = FeatureGateDescription{ FeatureGateAttributes: FeatureGateAttributes{ @@ -291,4 +291,125 @@ var ( ResponsiblePerson: "msluiter", OwningProduct: ocpSpecific, } + + FeatureGateDNSNameResolver = FeatureGateName("DNSNameResolver") + dnsNameResolver = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateDNSNameResolver, + }, + OwningJiraComponent: "dns", + ResponsiblePerson: "miciah", + OwningProduct: ocpSpecific, + } + + FeatureGateVSphereControlPlaneMachineset = FeatureGateName("VSphereControlPlaneMachineSet") + vSphereControlPlaneMachineset = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateVSphereControlPlaneMachineset, + }, + OwningJiraComponent: "splat", + ResponsiblePerson: "rvanderp3", + OwningProduct: ocpSpecific, + } + + FeatureGateMachineConfigNodes = FeatureGateName("MachineConfigNodes") + machineConfigNodes = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateMachineConfigNodes, + }, + OwningJiraComponent: "MachineConfigOperator", + ResponsiblePerson: "cdoern", + OwningProduct: ocpSpecific, + } + + FeatureGateClusterAPIInstall = FeatureGateName("ClusterAPIInstall") + clusterAPIInstall = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateClusterAPIInstall, + }, + OwningJiraComponent: "Installer", + ResponsiblePerson: "vincepri", + OwningProduct: ocpSpecific, + } + + FeatureGateMetricsServer = FeatureGateName("MetricsServer") + metricsServer = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateMetricsServer, + }, + OwningJiraComponent: "Monitoring", + ResponsiblePerson: "slashpai", + OwningProduct: ocpSpecific, + } + + FeatureGateInstallAlternateInfrastructureAWS = FeatureGateName("InstallAlternateInfrastructureAWS") + installAlternateInfrastructureAWS = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateInstallAlternateInfrastructureAWS, + }, + OwningJiraComponent: "Installer", + ResponsiblePerson: "padillon", + OwningProduct: ocpSpecific, + } + + FeatureGateGCPClusterHostedDNS = FeatureGateName("GCPClusterHostedDNS") + gcpClusterHostedDNS = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateGCPClusterHostedDNS, + }, + OwningJiraComponent: "Installer", + ResponsiblePerson: "barbacbd", + OwningProduct: ocpSpecific, + } + + FeatureGateMixedCPUsAllocation = FeatureGateName("MixedCPUsAllocation") + mixedCPUsAllocation = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateMixedCPUsAllocation, + }, + OwningJiraComponent: "NodeTuningOperator", + ResponsiblePerson: "titzhak", + OwningProduct: ocpSpecific, + } + + FeatureGateManagedBootImages = FeatureGateName("ManagedBootImages") + managedBootImages = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateManagedBootImages, + }, + OwningJiraComponent: "MachineConfigOperator", + ResponsiblePerson: "djoshy", + OwningProduct: ocpSpecific, + } + + FeatureGateDisableKubeletCloudCredentialProviders = FeatureGateName("DisableKubeletCloudCredentialProviders") + disableKubeletCloudCredentialProviders = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateDisableKubeletCloudCredentialProviders, + }, + OwningJiraComponent: "cloud-provider", + ResponsiblePerson: "jspeed", + OwningProduct: kubernetes, + } + + + FeatureGateOnClusterBuild = FeatureGateName("OnClusterBuild") + onClusterBuild = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateOnClusterBuild, + }, + OwningJiraComponent: "MachineConfigOperator", + ResponsiblePerson: "dkhater", + OwningProduct: ocpSpecific, + } + + FeatureGateSignatureStores = FeatureGateName("SignatureStores") + signatureStores = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateSignatureStores, + }, + OwningJiraComponent: "over-the-air-updates", + ResponsiblePerson: "lmohanty", + OwningProduct: ocpSpecific, + } ) diff --git a/vendor/github.com/openshift/api/config/v1/stable.build.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.build.testsuite.yaml index cdd8a9b701..b422ebd206 100644 --- a/vendor/github.com/openshift/api/config/v1/stable.build.testsuite.yaml +++ b/vendor/github.com/openshift/api/config/v1/stable.build.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] Build" -crd: 0000_10_config-operator_01_build.crd.yaml +crd: 0000_10_openshift-controller-manager-operator_01_build.crd.yaml tests: onCreate: - name: Should be able to create a minimal Build diff --git a/vendor/github.com/openshift/api/config/v1/stable.clusterversion.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.clusterversion.testsuite.yaml index 50bb3e0274..4c3fed149d 100644 --- a/vendor/github.com/openshift/api/config/v1/stable.clusterversion.testsuite.yaml +++ b/vendor/github.com/openshift/api/config/v1/stable.clusterversion.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] ClusterVersion" -crd: 0000_00_cluster-version-operator_01_clusterversion.crd.yaml +crd: 0000_00_cluster-version-operator_01_clusterversion-Default.crd.yaml tests: onCreate: - name: Should be able to create a minimal ClusterVersion diff --git a/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml index 99b11b0894..025cae85a8 100644 --- a/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml +++ b/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml @@ -993,9 +993,9 @@ tests: type: IBMCloud ibmcloud: serviceEndpoints: - - name: DummyVPC + - name: VPC url: https://dummy.vpc.com - - name: DummyCOS + - name: COS url: https://dummy.cos.com expected: | apiVersion: config.openshift.io/v1 @@ -1010,9 +1010,9 @@ tests: type: IBMCloud ibmcloud: serviceEndpoints: - - name: DummyVPC + - name: VPC url: https://dummy.vpc.com - - name: DummyCOS + - name: COS url: https://dummy.cos.com - name: Should not be able to add empty (URL) ServiceEndpoints to IBMCloud PlatformStatus initial: | @@ -1035,7 +1035,7 @@ tests: type: IBMCloud ibmcloud: serviceEndpoints: - - name: EmptyCOS + - name: COS url: " " expectedStatusError: " status.platformStatus.ibmcloud.serviceEndpoints[0].url: Invalid value: \"string\": url must be a valid absolute URL" - name: Should not be able to add invalid (URL) ServiceEndpoints to IBMCloud PlatformStatus @@ -1059,8 +1059,34 @@ tests: type: IBMCloud ibmcloud: serviceEndpoints: - - name: DummyVPC + - name: VPC url: https://dummy.vpc.com - - name: BadCOS + - name: COS url: dummy-cos-com expectedStatusError: " status.platformStatus.ibmcloud.serviceEndpoints[1].url: Invalid value: \"string\": url must be a valid absolute URL" + - name: Should not be able to add invalid (Name) ServiceEndpoints to IBMCloud PlatformStatus + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: IBMCloud + platformStatus: + type: IBMCloud + ibmcloud: + serviceEndpoints: [] + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: IBMCloud + platformStatus: + type: IBMCloud + ibmcloud: + serviceEndpoints: + - name: VPC + url: https://dummy.vpc.com + - name: BadService + url: https://bad-service.com + expectedStatusError: " status.platformStatus.ibmcloud.serviceEndpoints[1].name: Unsupported value: \"BadService\": supported values: \"CIS\", \"COS\", \"DNSServices\", \"GlobalSearch\", \"GlobalTagging\", \"HyperProtect\", \"IAM\", \"KeyProtect\", \"ResourceController\", \"ResourceManager\", \"VPC\"" diff --git a/vendor/github.com/openshift/api/config/v1/stable.network.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.network.testsuite.yaml index e8a8bcfaf2..7922d44812 100644 --- a/vendor/github.com/openshift/api/config/v1/stable.network.testsuite.yaml +++ b/vendor/github.com/openshift/api/config/v1/stable.network.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] Network" -crd: 0000_10_config-operator_01_network.crd.yaml +crd: 0000_10_config-operator_01_network-Default.crd.yaml tests: onCreate: - name: Should be able to create a minimal Network @@ -12,3 +12,20 @@ tests: apiVersion: config.openshift.io/v1 kind: Network spec: {} + - name: Should not be able to set status conditions + initial: | + apiVersion: config.openshift.io/v1 + kind: Network + spec: {} # No spec is required for a Network + status: + conditions: + - type: NetworkTypeMigrationInProgress + status: "False" + reason: "Reason" + message: "Message" + lastTransitionTime: "2023-10-25T12:00:00Z" + expected: | + apiVersion: config.openshift.io/v1 + kind: Network + spec: {} + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.authentication.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.authentication.testsuite.yaml new file mode 100644 index 0000000000..f904ceafab --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/techpreview.authentication.testsuite.yaml @@ -0,0 +1,287 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreviewNoUpgrade] Authentication" +crd: 0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml +tests: + onCreate: + - name: Should be able to create a minimal Authentication + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: {} # No spec is required for a Authentication + expected: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: {} + - name: Cannot set username claim prefix with policy NoPrefix + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: NoPrefix + prefix: + prefixString: "myoidc:" + expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" + - name: Can set username claim prefix with policy Prefix + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: Prefix + prefix: + prefixString: "myoidc:" + expected: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: Prefix + prefix: + prefixString: "myoidc:" + - name: Cannot leave username claim prefix blank with policy Prefix + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: Prefix + expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" + - name: Can set OIDC providers with no username prefixing + initial: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: NoPrefix + expected: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + claimMappings: + username: + claim: "preferred_username" + prefixPolicy: NoPrefix + onUpdate: + - name: Updating OIDC provider with a client that's not in the status + initial: &initConfig | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + oidcClients: + - componentNamespace: namespace + componentName: preexisting + clientID: someclient + - componentNamespace: namespace + componentName: name + clientID: legitclient + status: + oidcClients: + - componentNamespace: namespace + componentName: name + - componentNamespace: namespace2 + componentName: name2 + - componentNamespace: namespace2 + componentName: name3 + updated: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + oidcClients: + - componentNamespace: namespace + componentName: preexisting + clientID: someclient + - componentNamespace: namespace + componentName: name + clientID: legitclient + - componentNamespace: dif-namespace # new client here + componentName: tehName + clientID: cool-client + status: + oidcClients: + - componentNamespace: namespace + componentName: name + - componentNamespace: namespace2 + componentName: name2 + - componentNamespace: namespace2 + componentName: name3 + expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" + - name: Updating OIDC provider with a client that's different from the previous one + initial: *initConfig + updated: | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + oidcClients: + - componentNamespace: dif-namespace + componentName: tehName + clientID: cool-client + status: + oidcClients: + - componentNamespace: namespace + componentName: name + - componentNamespace: namespace2 + componentName: name2 + - componentNamespace: namespace2 + componentName: name3 + expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" + - name: Updating previously existing client + initial: *initConfig + updated: &prevExistingUpdated | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + oidcClients: + - componentNamespace: namespace + componentName: preexisting + clientID: different-client + status: + oidcClients: + - componentNamespace: namespace + componentName: name + - componentNamespace: namespace2 + componentName: name2 + - componentNamespace: namespace2 + componentName: name3 + expected: *prevExistingUpdated + - name: Removing a configured client from the status (== component unregister) + initial: *initConfig + updated: &removeFromStatus | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + oidcClients: + - componentNamespace: namespace + componentName: preexisting + clientID: different-client + - componentNamespace: namespace + componentName: name + clientID: legitclient + status: + oidcClients: + - componentNamespace: namespace2 + componentName: name2 + - componentNamespace: namespace2 + componentName: name3 + expected: *removeFromStatus + - name: Simply add a valid client + initial: *initConfig + updated: &addClient | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + oidcProviders: + - name: myoidc + issuer: + issuerURL: https://meh.tld + audiences: ['openshift-aud'] + oidcClients: + - componentNamespace: namespace + componentName: preexisting + clientID: different-client + - componentNamespace: namespace + componentName: name + clientID: legitclient + - componentNamespace: namespace2 + componentName: name3 + clientID: justavalidclient + status: + oidcClients: + - componentNamespace: namespace + componentName: name + - componentNamespace: namespace2 + componentName: name2 + - componentNamespace: namespace2 + componentName: name3 + expected: *addClient + - name: Remove all oidcProviders + initial: *initConfig + updated: &removeFromStatus | + apiVersion: config.openshift.io/v1 + kind: Authentication + spec: + type: OIDC + status: + oidcClients: + - componentNamespace: namespace2 + componentName: name2 + - componentNamespace: namespace2 + componentName: name3 + expected: *removeFromStatus \ No newline at end of file diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.clusterversion.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.clusterversion.testsuite.yaml new file mode 100644 index 0000000000..71988108e5 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/techpreview.clusterversion.testsuite.yaml @@ -0,0 +1,472 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreviewNoUpgrade] ClusterVersion" +crd: 0000_00_cluster-version-operator_01_clusterversion-TechPreviewNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ClusterVersion + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + - name: Should allow image to be set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + image: bar + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + image: bar + - name: Should allow version to be set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + version: 4.11.1 + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + version: 4.11.1 + - name: Should allow architecture to be empty + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: "" + version: 4.11.1 + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: "" + version: 4.11.1 + - name: Should allow architecture and version to be set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + version: 4.11.1 + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + version: 4.11.1 + - name: Version must be set if architecture is set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + expectedError: "Version must be set if Architecture is set" + - name: Should not allow image and architecture to be set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + version: 4.11.1 + image: bar + expectedError: "cannot set both Architecture and Image" + - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities baremetal and MachineAPI + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + - MachineAPI + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + - MachineAPI + - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities baremetal without MachineAPI + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability + - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities marketplace and OperatorLifecycleManager + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + - OperatorLifecycleManager + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + - OperatorLifecycleManager + - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities marketplace without OperatorLifecycleManager + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability + - name: Should be able to set a custom signature store + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: + - url: "https://osus.ocp.com" + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: + - url: "https://osus.ocp.com" + - name: Should be able to set multiple custom signature store + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: + - url: "https://osus1.ocp.com" + - url: "https://osus2.ocp.com" + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: + - url: "https://osus1.ocp.com" + - url: "https://osus2.ocp.com" + - name: Invalid custom signature store should throw error + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: + - url: "osus1.ocp.com" + expectedError: "url must be a valid absolute URL" + - name: Should be able to unset the signature stores + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: [] + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + signatureStores: [] + onUpdate: + - name: Should not allow image to be set if architecture set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + version: 4.11.1 + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + version: 4.11.1 + image: bar + expectedError: "cannot set both Architecture and Image" + - name: Should not allow architecture to be set if image set + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + image: bar + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + desiredUpdate: + architecture: Multi + version: 4.11.1 + image: bar + expectedError: "cannot set both Architecture and Image" + - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, and implicitly enabled MachineAPI + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + status: + desired: + version: foo + image: foo + observedGeneration: 1 + versionHash: foo + availableUpdates: + - version: foo + image: foo + capabilities: + enabledCapabilities: + - MachineAPI + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + status: + desired: + version: foo + image: foo + observedGeneration: 1 + versionHash: foo + availableUpdates: + - version: foo + image: foo + capabilities: + enabledCapabilities: + - MachineAPI + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + status: + desired: + version: foo + image: foo + observedGeneration: 1 + versionHash: foo + availableUpdates: + - version: foo + image: foo + capabilities: + enabledCapabilities: + - MachineAPI + - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, with the Machine API capability + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + - MachineAPI + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + - MachineAPI + - name: Should not be able to add the baremetal capability with a ClusterVersion with base capability None, and without MachineAPI + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - baremetal + expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability + - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, and implicitly enabled OperatorLifecycleManager + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + status: + desired: + version: foo + image: foo + observedGeneration: 1 + versionHash: foo + availableUpdates: + - version: foo + image: foo + capabilities: + enabledCapabilities: + - OperatorLifecycleManager + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + status: + desired: + version: foo + image: foo + observedGeneration: 1 + versionHash: foo + availableUpdates: + - version: foo + image: foo + capabilities: + enabledCapabilities: + - OperatorLifecycleManager + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + status: + desired: + version: foo + image: foo + observedGeneration: 1 + versionHash: foo + availableUpdates: + - version: foo + image: foo + capabilities: + enabledCapabilities: + - OperatorLifecycleManager + - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, with the OperatorLifecycleManager capability + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + - OperatorLifecycleManager + expected: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + - OperatorLifecycleManager + - name: Should not be able to add the marketplace capability with a ClusterVersion with base capability None, and without OperatorLifecycleManager + initial: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + updated: | + apiVersion: config.openshift.io/v1 + kind: ClusterVersion + spec: + clusterID: foo + capabilities: + baselineCapabilitySet: None + additionalEnabledCapabilities: + - marketplace + expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml index 7834e1f841..78501fb174 100644 --- a/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml +++ b/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml @@ -517,3 +517,48 @@ tests: resourceTags: - {parentID: "test-project-123", key: "key", value: "value"} expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" + - name: Should not be able to modify the cluster hosted dns value for GCP Platform Status + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: GCP + platformStatus: + type: GCP + gcp: + clusterHostedDNS: "Enabled" + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + clusterHostedDNS: "Disabled" + expectedStatusError: "status.platformStatus.gcp.clusterHostedDNS: Invalid value: \"string\": clusterHostedDNS is immutable and may only be configured during installation" + - name: Should not be able to remove GCP cluster hosted DNS from platformStatus.gcp + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + clusterHostedDNS: "Enabled" + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: {} + expectedStatusError: "status.platformStatus.gcp.clusterHostedDNS: Invalid value: \"string\": clusterHostedDNS is immutable and may only be configured during installation" diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.network.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.network.testsuite.yaml new file mode 100644 index 0000000000..d15fae3a90 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/techpreview.network.testsuite.yaml @@ -0,0 +1,28 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreviewNoUpgrade] Network" +crd: 0000_10_config-operator_01_network-TechPreviewNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to set status conditions + initial: | + apiVersion: config.openshift.io/v1 + kind: Network + spec: {} # No spec is required for a Network + status: + conditions: + - type: NetworkTypeMigrationInProgress + status: "False" + reason: "Reason" + message: "Message" + lastTransitionTime: "2023-10-25T12:00:00Z" + expected: | + apiVersion: config.openshift.io/v1 + kind: Network + spec: {} + status: + conditions: + - type: NetworkTypeMigrationInProgress + status: "False" + reason: "Reason" + message: "Message" + lastTransitionTime: "2023-10-25T12:00:00Z" diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go index 56d00648ee..6fb1b9adc9 100644 --- a/vendor/github.com/openshift/api/config/v1/types.go +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -398,3 +398,33 @@ const ( // NoOpinionIncludeSubDomains means HSTS "includeSubDomains" doesn't matter to the RequiredHSTSPolicy NoOpinionIncludeSubDomains IncludeSubDomainsPolicy = "NoOpinion" ) + +// IBMCloudServiceName contains a value specifying the name of an IBM Cloud Service, +// which are used by MAPI, CIRO, CIO, Installer, etc. +// +kubebuilder:validation:Enum=CIS;COS;DNSServices;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;ResourceController;ResourceManager;VPC +type IBMCloudServiceName string + +const ( + // IBMCloudServiceCIS is the name for IBM Cloud CIS. + IBMCloudServiceCIS IBMCloudServiceName = "CIS" + // IBMCloudServiceCOS is the name for IBM Cloud COS. + IBMCloudServiceCOS IBMCloudServiceName = "COS" + // IBMCloudServiceDNSServices is the name for IBM Cloud DNS Services. + IBMCloudServiceDNSServices IBMCloudServiceName = "DNSServices" + // IBMCloudServiceGlobalSearch is the name for IBM Cloud Global Search. + IBMCloudServiceGlobalSearch IBMCloudServiceName = "GlobalSearch" + // IBMCloudServiceGlobalTagging is the name for IBM Cloud Global Tagging. + IBMCloudServiceGlobalTagging IBMCloudServiceName = "GlobalTagging" + // IBMCloudServiceHyperProtect is the name for IBM Cloud Hyper Protect. + IBMCloudServiceHyperProtect IBMCloudServiceName = "HyperProtect" + // IBMCloudServiceIAM is the name for IBM Cloud IAM. + IBMCloudServiceIAM IBMCloudServiceName = "IAM" + // IBMCloudServiceKeyProtect is the name for IBM Cloud Key Protect. + IBMCloudServiceKeyProtect IBMCloudServiceName = "KeyProtect" + // IBMCloudServiceResourceController is the name for IBM Cloud Resource Controller. + IBMCloudServiceResourceController IBMCloudServiceName = "ResourceController" + // IBMCloudServiceResourceManager is the name for IBM Cloud Resource Manager. + IBMCloudServiceResourceManager IBMCloudServiceName = "ResourceManager" + // IBMCloudServiceVPC is the name for IBM Cloud VPC. + IBMCloudServiceVPC IBMCloudServiceName = "VPC" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 5d18860c3a..59b89388bd 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -54,7 +54,7 @@ type APIServerSpec struct { // // If unset, a default (which may change between releases) is chosen. Note that only Old, // Intermediate and Custom profiles are currently supported, and the maximum available - // MinTLSVersions is VersionTLS12. + // minTLSVersion is VersionTLS12. // +optional TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` // audit specifies the settings for audit configuration to be applied to all OpenShift-provided diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index dd2ef6e0ae..b9d1e0c52a 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -4,7 +4,9 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient // +genclient:nonNamespaced +// +kubebuilder:subresource:status // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:validation:FeatureSetAwareXValidation:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" // Authentication specifies cluster-wide settings for authentication (like OAuth and // webhook token authenticators). The canonical name of an instance is `cluster`. @@ -50,12 +52,16 @@ type AuthenticationSpec struct { OAuthMetadata ConfigMapNameReference `json:"oauthMetadata"` // webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + // +listType=atomic WebhookTokenAuthenticators []DeprecatedWebhookTokenAuthenticator `json:"webhookTokenAuthenticators,omitempty"` // webhookTokenAuthenticator configures a remote token reviewer. // These remote authentication webhooks can be used to verify bearer tokens // via the tokenreviews.authentication.k8s.io REST API. This is required to // honor bearer tokens that are provisioned by an external authentication service. + // + // Can only be set if "Type" is set to "None". + // // +optional WebhookTokenAuthenticator *WebhookTokenAuthenticator `json:"webhookTokenAuthenticator,omitempty"` @@ -69,6 +75,18 @@ type AuthenticationSpec struct { // This allows internal components to transition to use new service account issuer without service distruption. // +optional ServiceAccountIssuer string `json:"serviceAccountIssuer"` + + // OIDCProviders are OIDC identity providers that can issue tokens + // for this cluster + // Can only be set if "Type" is set to "OIDC". + // + // At most one provider can be configured. + // + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=1 + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"` } type AuthenticationStatus struct { @@ -87,8 +105,15 @@ type AuthenticationStatus struct { // The namespace for this config map is openshift-config-managed. IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"` - // TODO if we add support for an in-cluster operator managed Keycloak instance - // KeycloakOAuthMetadata ConfigMapNameReference `json:"keycloakOAuthMetadata"` + // OIDCClients is where participating operators place the current OIDC client status + // for OIDC clients that can be customized by the cluster-admin. + // + // +listType=map + // +listMapKey=componentNamespace + // +listMapKey=componentName + // +kubebuilder:validation:MaxItems=20 + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + OIDCClients []OIDCClientStatus `json:"oidcClients"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -110,15 +135,17 @@ type AuthenticationType string const ( // None means that no cluster managed authentication system is in place. // Note that user login will only work if a manually configured system is in place and - // referenced in authentication spec via oauthMetadata and webhookTokenAuthenticators. + // referenced in authentication spec via oauthMetadata and + // webhookTokenAuthenticator/oidcProviders AuthenticationTypeNone AuthenticationType = "None" // IntegratedOAuth refers to the cluster managed OAuth server. // It is configured via the top level OAuth config. AuthenticationTypeIntegratedOAuth AuthenticationType = "IntegratedOAuth" - // TODO if we add support for an in-cluster operator managed Keycloak instance - // AuthenticationTypeKeycloak AuthenticationType = "Keycloak" + // AuthenticationTypeOIDC refers to a configuration with an external + // OIDC server configured directly with the kube-apiserver. + AuthenticationTypeOIDC AuthenticationType = "OIDC" ) // deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. @@ -159,3 +186,290 @@ const ( // KubeConfigKey is the key for the kube config file data in a secret KubeConfigKey = "kubeConfig" ) + +type OIDCProvider struct { + // Name of the OIDC provider + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + // Issuer describes atributes of the OIDC token issuer + // + // +kubebuilder:validation:Required + // +required + Issuer TokenIssuer `json:"issuer"` + + // OIDCClients contains configuration for the platform's clients that + // need to request tokens from the issuer + // + // +listType=map + // +listMapKey=componentNamespace + // +listMapKey=componentName + // +kubebuilder:validation:MaxItems=20 + OIDCClients []OIDCClientConfig `json:"oidcClients"` + + // ClaimMappings describes rules on how to transform information from an + // ID token into a cluster identity + ClaimMappings TokenClaimMappings `json:"claimMappings"` + + // ClaimValidationRules are rules that are applied to validate token claims to authenticate users. + // + // +listType=atomic + ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"` +} + +// +kubebuilder:validation:MinLength=1 +type TokenAudience string + +type TokenIssuer struct { + // URL is the serving URL of the token issuer. + // Must use the https:// scheme. + // + // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` + // +kubebuilder:validation:Required + // +required + URL string `json:"issuerURL"` + + // Audiences is an array of audiences that the token was issued for. + // Valid tokens must include at least one of these values in their + // "aud" claim. + // Must be set to exactly one value. + // + // +listType=set + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxItems=1 + // +required + Audiences []TokenAudience `json:"audiences"` + + // CertificateAuthority is a reference to a config map in the + // configuration namespace. The .data of the configMap must contain + // the "ca-bundle.crt" key. + // If unset, system trust is used instead. + CertificateAuthority ConfigMapNameReference `json:"issuerCertificateAuthority"` +} + +type TokenClaimMappings struct { + // Username is a name of the claim that should be used to construct + // usernames for the cluster identity. + // + // Default value: "sub" + Username UsernameClaimMapping `json:"username,omitempty"` + + // Groups is a name of the claim that should be used to construct + // groups for the cluster identity. + // The referenced claim must use array of strings values. + Groups PrefixedClaimMapping `json:"groups,omitempty"` +} + +type TokenClaimMapping struct { + // Claim is a JWT token claim to be used in the mapping + // + // +kubebuilder:validation:Required + // +required + Claim string `json:"claim"` +} + +type OIDCClientConfig struct { + // ComponentName is the name of the component that is supposed to consume this + // client configuration + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Required + // +required + ComponentName string `json:"componentName"` + + // ComponentNamespace is the namespace of the component that is supposed to consume this + // client configuration + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Required + // +required + ComponentNamespace string `json:"componentNamespace"` + + // ClientID is the identifier of the OIDC client from the OIDC provider + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + ClientID string `json:"clientID"` + + // ClientSecret refers to a secret in the `openshift-config` namespace that + // contains the client secret in the `clientSecret` key of the `.data` field + ClientSecret SecretNameReference `json:"clientSecret"` + + // ExtraScopes is an optional set of scopes to request tokens with. + // + // +listType=set + ExtraScopes []string `json:"extraScopes"` +} + +type OIDCClientStatus struct { + // ComponentName is the name of the component that will consume a client configuration. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Required + // +required + ComponentName string `json:"componentName"` + + // ComponentNamespace is the namespace of the component that will consume a client configuration. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Required + // +required + ComponentNamespace string `json:"componentNamespace"` + + // CurrentOIDCClients is a list of clients that the component is currently using. + // + // +listType=map + // +listMapKey=issuerURL + // +listMapKey=clientID + CurrentOIDCClients []OIDCClientReference `json:"currentOIDCClients"` + + // ConsumingUsers is a slice of ServiceAccounts that need to have read + // permission on the `clientSecret` secret. + // + // +kubebuilder:validation:MaxItems=5 + // +listType=set + ConsumingUsers []ConsumingUser `json:"consumingUsers"` + + // Conditions are used to communicate the state of the `oidcClients` entry. + // + // Supported conditions include Available, Degraded and Progressing. + // + // If Available is true, the component is successfully using the configured client. + // If Degraded is true, that means something has gone wrong trying to handle the client configuration. + // If Progressing is true, that means the component is taking some action related to the `oidcClients` entry. + // + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +type OIDCClientReference struct { + // OIDCName refers to the `name` of the provider from `oidcProviders` + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + OIDCProviderName string `json:"oidcProviderName"` + + // URL is the serving URL of the token issuer. + // Must use the https:// scheme. + // + // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` + // +kubebuilder:validation:Required + // +required + IssuerURL string `json:"issuerURL"` + + // ClientID is the identifier of the OIDC client from the OIDC provider + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + ClientID string `json:"clientID"` +} + +// +kubebuilder:validation:XValidation:rule="has(self.prefixPolicy) && self.prefixPolicy == 'Prefix' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)",message="prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" +type UsernameClaimMapping struct { + TokenClaimMapping `json:",inline"` + + // PrefixPolicy specifies how a prefix should apply. + // + // By default, claims other than `email` will be prefixed with the issuer URL to + // prevent naming clashes with other plugins. + // + // Set to "NoPrefix" to disable prefixing. + // + // Example: + // (1) `prefix` is set to "myoidc:" and `claim` is set to "username". + // If the JWT claim `username` contains value `userA`, the resulting + // mapped value will be "myoidc:userA". + // (2) `prefix` is set to "myoidc:" and `claim` is set to "email". If the + // JWT `email` claim contains value "userA@myoidc.tld", the resulting + // mapped value will be "myoidc:userA@myoidc.tld". + // (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, + // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", + // and `claim` is set to: + // (a) "username": the mapped value will be "https://myoidc.tld#userA" + // (b) "email": the mapped value will be "userA@myoidc.tld" + // + // +kubebuilder:validation:Enum={"", "NoPrefix", "Prefix"} + PrefixPolicy UsernamePrefixPolicy `json:"prefixPolicy"` + + Prefix *UsernamePrefix `json:"prefix"` +} + +type UsernamePrefixPolicy string + +var ( + // NoOpinion let's the cluster assign prefixes. If the username claim is email, there is no prefix + // If the username claim is anything else, it is prefixed by the issuerURL + NoOpinion UsernamePrefixPolicy = "" + + // NoPrefix means the username claim value will not have any prefix + NoPrefix UsernamePrefixPolicy = "NoPrefix" + + // Prefix means the prefix value must be specified. It cannot be empty + Prefix UsernamePrefixPolicy = "Prefix" +) + +type UsernamePrefix struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + PrefixString string `json:"prefixString"` +} + +type PrefixedClaimMapping struct { + TokenClaimMapping `json:",inline"` + + // Prefix is a string to prefix the value from the token in the result of the + // claim mapping. + // + // By default, no prefixing occurs. + // + // Example: if `prefix` is set to "myoidc:"" and the `claim` in JWT contains + // an array of strings "a", "b" and "c", the mapping will result in an + // array of string "myoidc:a", "myoidc:b" and "myoidc:c". + Prefix string `json:"prefix"` +} + +type TokenValidationRuleType string + +const ( + TokenValidationRuleTypeRequiredClaim = "RequiredClaim" +) + +type TokenClaimValidationRule struct { + // Type sets the type of the validation rule + // + // +kubebuilder:validation:Enum={"RequiredClaim"} + // +kubebuilder:default="RequiredClaim" + Type TokenValidationRuleType `json:"type"` + + // RequiredClaim allows configuring a required claim name and its expected + // value + RequiredClaim *TokenRequiredClaim `json:"requiredClaim"` +} + +type TokenRequiredClaim struct { + // Claim is a name of a required claim. Only claims with string values are + // supported. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + Claim string `json:"claim"` + + // RequiredValue is the required value for the claim. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Required + // +required + RequiredValue string `json:"requiredValue"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index a9bade6fe7..0ceedbab45 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -88,9 +88,32 @@ type ClusterVersionSpec struct { // +optional Capabilities *ClusterVersionCapabilitiesSpec `json:"capabilities,omitempty"` + // signatureStores contains the upstream URIs to verify release signatures and optional + // reference to a config map by name containing the PEM-encoded CA bundle. + // + // By default, CVO will use existing signature stores if this property is empty. + // The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature + // in these stores in parallel only when local ConfigMaps did not include a valid signature. + // Validation will fail if none of the signature stores reply with valid signature before timeout. + // Setting signatureStores will replace the default signature stores with custom signature stores. + // Default stores can be used with custom signature stores by adding them manually. + // + // A maximum of 32 signature stores may be configured. + // +kubebuilder:validation:MaxItems=32 + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +listType=map + // +listMapKey=url + // +optional + SignatureStores []SignatureStore `json:"signatureStores"` + // overrides is list of overides for components that are managed by // cluster version operator. Marking a component unmanaged will prevent // the operator from creating or updating the object. + // +listType=map + // +listMapKey=kind + // +listMapKey=group + // +listMapKey=namespace + // +listMapKey=name // +optional Overrides []ComponentOverride `json:"overrides,omitempty"` } @@ -116,6 +139,7 @@ type ClusterVersionStatus struct { // Completed if the rollout completed - if an update was failing or halfway // applied the state will be Partial. Only a limited amount of update history // is preserved. + // +listType=atomic // +optional History []UpdateHistory `json:"history,omitempty"` @@ -143,8 +167,12 @@ type ClusterVersionStatus struct { // by a temporary or permanent error. Conditions are only valid for the // current desiredUpdate when metadata.generation is equal to // status.generation. + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge // +optional - Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty"` + Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` // availableUpdates contains updates recommended for this // cluster. Updates which appear in conditionalUpdates but not in @@ -153,6 +181,7 @@ type ClusterVersionStatus struct { // is unavailable, or if an invalid channel has been specified. // +nullable // +kubebuilder:validation:Required + // +listType=atomic // +required AvailableUpdates []Release `json:"availableUpdates"` @@ -249,7 +278,7 @@ const ( ) // ClusterVersionCapability enumerates optional, core cluster components. -// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager +// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager;CloudCredential type ClusterVersionCapability string const ( @@ -343,6 +372,10 @@ const ( // ClusterVersionCapabilityOperatorLifecycleManager manages the Operator Lifecycle Manager // which itself manages the lifecycle of operators ClusterVersionCapabilityOperatorLifecycleManager ClusterVersionCapability = "OperatorLifecycleManager" + + // ClusterVersionCapabilityCloudCredential manages credentials for cloud providers + // in openshift cluster + ClusterVersionCapabilityCloudCredential ClusterVersionCapability = "CloudCredential" ) // KnownClusterVersionCapabilities includes all known optional, core cluster components. @@ -360,10 +393,11 @@ var KnownClusterVersionCapabilities = []ClusterVersionCapability{ ClusterVersionCapabilityDeploymentConfig, ClusterVersionCapabilityImageRegistry, ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityCloudCredential, } // ClusterVersionCapabilitySet defines sets of cluster version capabilities. -// +kubebuilder:validation:Enum=None;v4.11;v4.12;v4.13;v4.14;vCurrent +// +kubebuilder:validation:Enum=None;v4.11;v4.12;v4.13;v4.14;v4.15;vCurrent type ClusterVersionCapabilitySet string const ( @@ -395,6 +429,12 @@ const ( // version of OpenShift is installed. ClusterVersionCapabilitySet4_14 ClusterVersionCapabilitySet = "v4.14" + // ClusterVersionCapabilitySet4_15 is the recommended set of + // optional capabilities to enable for the 4.15 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_15 ClusterVersionCapabilitySet = "v4.15" + // ClusterVersionCapabilitySetCurrent is the recommended set // of optional capabilities to enable for the cluster's // current version of OpenShift. @@ -445,6 +485,22 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers ClusterVersionCapabilityDeploymentConfig, ClusterVersionCapabilityImageRegistry, }, + ClusterVersionCapabilitySet4_15: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + ClusterVersionCapabilityImageRegistry, + ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityCloudCredential, + }, ClusterVersionCapabilitySetCurrent: { ClusterVersionCapabilityBaremetal, ClusterVersionCapabilityConsole, @@ -459,6 +515,7 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers ClusterVersionCapabilityDeploymentConfig, ClusterVersionCapabilityImageRegistry, ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityCloudCredential, }, } @@ -598,6 +655,7 @@ type Release struct { // channels is the set of Cincinnati channels to which the release // currently belongs. + // +listType=set // +optional Channels []string `json:"channels,omitempty"` } @@ -728,3 +786,26 @@ type ClusterVersionList struct { Items []ClusterVersion `json:"items"` } + +// SignatureStore represents the URL of custom Signature Store +type SignatureStore struct { + + // url contains the upstream custom signature store URL. + // url should be a valid absolute http/https URI of an upstream signature store as per rfc1738. + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL" + // +kubebuilder:validation:Required + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the signature store is not honored. + // If the specified ca data is not valid, the signature store is not honored. + // If empty, we fall back to the CA configured via Proxy, which is appended to the default system roots. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 4bae91db88..9ac580eac8 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -159,27 +159,41 @@ type FeatureGateEnabledDisabled struct { var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ Default: defaultFeatures, CustomNoUpgrade: { - Enabled: []FeatureGateDescription{}, - Disabled: []FeatureGateDescription{}, + Enabled: []FeatureGateDescription{}, + Disabled: []FeatureGateDescription{ + disableKubeletCloudCredentialProviders, // We do not currently ship the correct config to use the external credentials provider. + }, }, TechPreviewNoUpgrade: newDefaultFeatures(). - without(validatingAdmissionPolicy). + with(validatingAdmissionPolicy). with(csiDriverSharedResource). with(nodeSwap). with(machineAPIProviderOpenStack). with(insightsConfigAPI). - with(retroactiveDefaultStorageClass). with(dynamicResourceAllocation). with(gateGatewayAPI). with(maxUnavailableStatefulSet). without(eventedPleg). with(sigstoreImageVerification). with(gcpLabelsTags). + with(gcpClusterHostedDNS). with(vSphereStaticIPs). with(routeExternalCertificate). with(automatedEtcdBackup). + with(vSphereControlPlaneMachineset). without(machineAPIOperatorDisableMachineHealthCheckController). with(adminNetworkPolicy). + with(dnsNameResolver). + with(machineConfigNodes). + with(metricsServer). + with(installAlternateInfrastructureAWS). + without(clusterAPIInstall). + with(sdnLiveMigration). + with(mixedCPUsAllocation). + with(managedBootImages). + without(disableKubeletCloudCredentialProviders). + with(onClusterBuild). + with(signatureStores). toFeatures(defaultFeatures), LatencySensitive: newDefaultFeatures(). toFeatures(defaultFeatures), @@ -199,7 +213,7 @@ var defaultFeatures = &FeatureGateEnabledDisabled{ buildCSIVolumes, }, Disabled: []FeatureGateDescription{ - retroactiveDefaultStorageClass, + disableKubeletCloudCredentialProviders, // We do not currently ship the correct config to use the external credentials provider. }, } diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 18d36519d1..e5a059c13c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -1,6 +1,8 @@ package v1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) // +genclient // +genclient:nonNamespaced @@ -229,6 +231,24 @@ const ( IBMCloudProviderTypeUPI IBMCloudProviderType = "UPI" ) +// ClusterHostedDNS indicates whether the cluster DNS is hosted by the cluster or Core DNS . +type ClusterHostedDNS string + +const ( + // EnabledClusterHostedDNS indicates that a DNS solution other than the default provided by the + // cloud platform is in use. In this mode, the cluster hosts a DNS solution during installation and the + // user is expected to provide their own DNS solution post-install. + // When "Enabled", the cluster will continue to use the default Load Balancers provided by the cloud + // platform. + EnabledClusterHostedDNS ClusterHostedDNS = "Enabled" + + // DisabledClusterHostedDNS indicates that the cluster is using the default DNS solution for the + // cloud platform. OpenShift is responsible for all the LB and DNS configuration needed for the + // cluster to be functional with no intervention from the user. To accomplish this, OpenShift + // configures the default LB and DNS solutions provided by the underlying cloud. + DisabledClusterHostedDNS ClusterHostedDNS = "Disabled" +) + // ExternalPlatformSpec holds the desired state for the generic External infrastructure provider. type ExternalPlatformSpec struct { // PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. @@ -610,6 +630,24 @@ type GCPPlatformStatus struct { // +optional // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade ResourceTags []GCPResourceTag `json:"resourceTags,omitempty"` + + // clusterHostedDNS indicates the type of DNS solution in use within the cluster. Its default value of + // "Disabled" indicates that the cluster's DNS is the default provided by the cloud platform. It can be + // "Enabled" during install to bypass the configuration of the cloud default DNS. When "Enabled", the + // cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. + // The cluster's use of the cloud's Load Balancers is unaffected by this setting. + // The value is immutable after it has been set at install time. + // Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. + // Enabling this functionality allows the user to start their own DNS solution outside the cluster after + // installation is complete. The customer would be responsible for configuring this custom DNS solution, + // and it can be run in addition to the in-cluster DNS solution. + // +kubebuilder:default:="Disabled" + // +default="Disabled" + // +kubebuilder:validation:Enum="Enabled";"Disabled" + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="clusterHostedDNS is immutable and may only be configured during installation" + // +optional + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + ClusterHostedDNS ClusterHostedDNS `json:"clusterHostedDNS,omitempty"` } // GCPResourceLabel is a label to apply to GCP resources created for the cluster. @@ -622,7 +660,7 @@ type GCPResourceLabel struct { // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Pattern=`^[a-z][0-9a-z_-]+$` + // +kubebuilder:validation:Pattern=`^[a-z][0-9a-z_-]{0,62}$` Key string `json:"key"` // value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. @@ -630,7 +668,7 @@ type GCPResourceLabel struct { // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Pattern=`^[0-9a-z_-]+$` + // +kubebuilder:validation:Pattern=`^[0-9a-z_-]{1,63}$` Value string `json:"value"` } @@ -1010,6 +1048,22 @@ type VSpherePlatformTopology struct { // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` // +optional Folder string `json:"folder,omitempty"` + + // template is the full inventory path of the virtual machine or template + // that will be cloned when creating new machines in this failure domain. + // The maximum length of the path is 2048 characters. + // + // When omitted, the template will be calculated by the control plane + // machineset operator based on the region and zone defined in + // VSpherePlatformFailureDomainSpec. + // For example, for zone=zonea, region=region1, and infrastructure name=test, + // the template path would be calculated as //vm/test-rhcos-region1-zonea. + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` + // +optional + Template string `json:"template,omitempty"` } // VSpherePlatformVCenterSpec stores the vCenter connection fields. @@ -1166,15 +1220,14 @@ type VSpherePlatformStatus struct { // override existing defaults of IBM Cloud Services. type IBMCloudServiceEndpoint struct { // name is the name of the IBM Cloud service. + // Possible values are: CIS, COS, DNSServices, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. // For example, the IBM Cloud Private IAM service could be configured with the // service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` // Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured // with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com` // // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9-]+$` - // +kubebuilder:validation:MaxLength=32 - Name string `json:"name"` + Name IBMCloudServiceName `json:"name"` // url is fully qualified URI with scheme https, that overrides the default generated // endpoint for a client. @@ -1209,13 +1262,13 @@ type IBMCloudPlatformStatus struct { // for the cluster's base domain DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` - // serviceEndpoints is a list of custom endpoints which will override the default - // service endpoints of an IBM Cloud service. These endpoints are consumed by + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of an IBM Cloud service. These endpoints are consumed by // components within the cluster to reach the respective IBM Cloud Services. - // +listType=map - // +listMapKey=name - // +optional - ServiceEndpoints []IBMCloudServiceEndpoint `json:"serviceEndpoints,omitempty"` + // +listType=map + // +listMapKey=name + // +optional + ServiceEndpoints []IBMCloudServiceEndpoint `json:"serviceEndpoints,omitempty"` } // KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. @@ -1402,6 +1455,75 @@ type NutanixPlatformSpec struct { // +listType=map // +listMapKey=name PrismElements []NutanixPrismElementEndpoint `json:"prismElements"` + + // failureDomains configures failure domains information for the Nutanix platform. + // When set, the failure domains defined here may be used to spread Machines across + // prism element clusters to improve fault tolerance of the cluster. + // +listType=map + // +listMapKey=name + // +optional + FailureDomains []NutanixFailureDomain `json:"failureDomains"` +} + +// NutanixFailureDomain configures failure domain information for the Nutanix platform. +type NutanixFailureDomain struct { + // name defines the unique name of a failure domain. + // Name is required and must be at most 64 characters in length. + // It must consist of only lower case alphanumeric characters and hyphens (-). + // It must start and end with an alphanumeric character. + // This value is arbitrary and is used to identify the failure domain within the platform. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:Pattern=`[a-z0-9]([-a-z0-9]*[a-z0-9])?` + Name string `json:"name"` + + // cluster is to identify the cluster (the Prism Element under management of the Prism Central), + // in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained + // from the Prism Central console or using the prism_central API. + // +kubebuilder:validation:Required + Cluster NutanixResourceIdentifier `json:"cluster"` + + // subnets holds a list of identifiers (one or more) of the cluster's network subnets + // for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be + // obtained from the Prism Central console or using the prism_central API. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=1 + // +listType=map + // +listMapKey=type + Subnets []NutanixResourceIdentifier `json:"subnets"` +} + +// NutanixIdentifierType is an enumeration of different resource identifier types. +// +kubebuilder:validation:Enum:=UUID;Name +type NutanixIdentifierType string + +const ( + // NutanixIdentifierUUID is a resource identifier identifying the object by UUID. + NutanixIdentifierUUID NutanixIdentifierType = "UUID" + + // NutanixIdentifierName is a resource identifier identifying the object by Name. + NutanixIdentifierName NutanixIdentifierType = "Name" +) + +// NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'UUID' ? has(self.uuid) : !has(self.uuid)",message="uuid configuration is required when type is UUID, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Name' ? has(self.name) : !has(self.name)",message="name configuration is required when type is Name, and forbidden otherwise" +// +union +type NutanixResourceIdentifier struct { + // type is the identifier type to use for this resource. + // +unionDiscriminator + // +kubebuilder:validation:Required + Type NutanixIdentifierType `json:"type"` + + // uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. + // +optional + UUID *string `json:"uuid,omitempty"` + + // name is the resource name in the PC. It cannot be empty if the type is Name. + // +optional + Name *string `json:"name,omitempty"` } // NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster) diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index c79bc8cf02..3d345b2d60 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -85,6 +85,18 @@ type NetworkStatus struct { // Migration contains the cluster network migration configuration. Migration *NetworkMigration `json:"migration,omitempty"` + + // conditions represents the observations of a network.config current state. + // Known .status.conditions.type are: "NetworkTypeMigrationInProgress", "NetworkTypeMigrationMTUReady", + // "NetworkTypeMigrationTargetCNIAvailable", "NetworkTypeMigrationTargetCNIInUse" + // and "NetworkTypeMigrationOriginalCNIPurged" + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } // ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go index 9dbacb9966..4f69de40cf 100644 --- a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go +++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go @@ -56,7 +56,7 @@ type TLSSecurityProfile struct { // - AES128-SHA // - AES256-SHA // - DES-CBC3-SHA - // minTLSVersion: TLSv1.0 + // minTLSVersion: VersionTLS10 // // +optional // +nullable @@ -79,7 +79,7 @@ type TLSSecurityProfile struct { // - ECDHE-RSA-CHACHA20-POLY1305 // - DHE-RSA-AES128-GCM-SHA256 // - DHE-RSA-AES256-GCM-SHA384 - // minTLSVersion: TLSv1.2 + // minTLSVersion: VersionTLS12 // // +optional // +nullable @@ -94,7 +94,7 @@ type TLSSecurityProfile struct { // - TLS_AES_128_GCM_SHA256 // - TLS_AES_256_GCM_SHA384 // - TLS_CHACHA20_POLY1305_SHA256 - // minTLSVersion: TLSv1.3 + // minTLSVersion: VersionTLS13 // // NOTE: Currently unsupported. // @@ -110,7 +110,7 @@ type TLSSecurityProfile struct { // - ECDHE-RSA-CHACHA20-POLY1305 // - ECDHE-RSA-AES128-GCM-SHA256 // - ECDHE-ECDSA-AES128-GCM-SHA256 - // minTLSVersion: TLSv1.1 + // minTLSVersion: VersionTLS11 // // +optional // +nullable @@ -167,7 +167,7 @@ type TLSProfileSpec struct { // that is negotiated during the TLS handshake. For example, to use TLS // versions 1.1, 1.2 and 1.3 (yaml): // - // minTLSVersion: TLSv1.1 + // minTLSVersion: VersionTLS11 // // NOTE: currently the highest minTLSVersion allowed is VersionTLS12 // diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 63b9f050d0..476602adef 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -453,7 +453,7 @@ func (in *Authentication) DeepCopyInto(out *Authentication) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -522,6 +522,13 @@ func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { *out = new(WebhookTokenAuthenticator) **out = **in } + if in.OIDCProviders != nil { + in, out := &in.OIDCProviders, &out.OIDCProviders + *out = make([]OIDCProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -539,6 +546,13 @@ func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { *out = *in out.IntegratedOAuthMetadata = in.IntegratedOAuthMetadata + if in.OIDCClients != nil { + in, out := &in.OIDCClients, &out.OIDCClients + *out = make([]OIDCClientStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1177,6 +1191,11 @@ func (in *ClusterVersionSpec) DeepCopyInto(out *ClusterVersionSpec) { *out = new(ClusterVersionCapabilitiesSpec) (*in).DeepCopyInto(*out) } + if in.SignatureStores != nil { + in, out := &in.SignatureStores, &out.SignatureStores + *out = make([]SignatureStore, len(*in)) + copy(*out, *in) + } if in.Overrides != nil { in, out := &in.Overrides, &out.Overrides *out = make([]ComponentOverride, len(*in)) @@ -3604,6 +3623,13 @@ func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { *out = new(NetworkMigration) (*in).DeepCopyInto(*out) } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3710,6 +3736,30 @@ func (in *NodeStatus) DeepCopy() *NodeStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixFailureDomain) DeepCopyInto(out *NutanixFailureDomain) { + *out = *in + in.Cluster.DeepCopyInto(&out.Cluster) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]NutanixResourceIdentifier, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixFailureDomain. +func (in *NutanixFailureDomain) DeepCopy() *NutanixFailureDomain { + if in == nil { + return nil + } + out := new(NutanixFailureDomain) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NutanixPlatformLoadBalancer) DeepCopyInto(out *NutanixPlatformLoadBalancer) { *out = *in @@ -3735,6 +3785,13 @@ func (in *NutanixPlatformSpec) DeepCopyInto(out *NutanixPlatformSpec) { *out = make([]NutanixPrismElementEndpoint, len(*in)) copy(*out, *in) } + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]NutanixFailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3812,6 +3869,32 @@ func (in *NutanixPrismEndpoint) DeepCopy() *NutanixPrismEndpoint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixResourceIdentifier) DeepCopyInto(out *NutanixResourceIdentifier) { + *out = *in + if in.UUID != nil { + in, out := &in.UUID, &out.UUID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixResourceIdentifier. +func (in *NutanixResourceIdentifier) DeepCopy() *NutanixResourceIdentifier { + if in == nil { + return nil + } + out := new(NutanixResourceIdentifier) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OAuth) DeepCopyInto(out *OAuth) { *out = *in @@ -3952,6 +4035,109 @@ func (in *OAuthTemplates) DeepCopy() *OAuthTemplates { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCClientConfig) DeepCopyInto(out *OIDCClientConfig) { + *out = *in + out.ClientSecret = in.ClientSecret + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientConfig. +func (in *OIDCClientConfig) DeepCopy() *OIDCClientConfig { + if in == nil { + return nil + } + out := new(OIDCClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCClientReference) DeepCopyInto(out *OIDCClientReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientReference. +func (in *OIDCClientReference) DeepCopy() *OIDCClientReference { + if in == nil { + return nil + } + out := new(OIDCClientReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCClientStatus) DeepCopyInto(out *OIDCClientStatus) { + *out = *in + if in.CurrentOIDCClients != nil { + in, out := &in.CurrentOIDCClients, &out.CurrentOIDCClients + *out = make([]OIDCClientReference, len(*in)) + copy(*out, *in) + } + if in.ConsumingUsers != nil { + in, out := &in.ConsumingUsers, &out.ConsumingUsers + *out = make([]ConsumingUser, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientStatus. +func (in *OIDCClientStatus) DeepCopy() *OIDCClientStatus { + if in == nil { + return nil + } + out := new(OIDCClientStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCProvider) DeepCopyInto(out *OIDCProvider) { + *out = *in + in.Issuer.DeepCopyInto(&out.Issuer) + if in.OIDCClients != nil { + in, out := &in.OIDCClients, &out.OIDCClients + *out = make([]OIDCClientConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ClaimMappings.DeepCopyInto(&out.ClaimMappings) + if in.ClaimValidationRules != nil { + in, out := &in.ClaimValidationRules, &out.ClaimValidationRules + *out = make([]TokenClaimValidationRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCProvider. +func (in *OIDCProvider) DeepCopy() *OIDCProvider { + if in == nil { + return nil + } + out := new(OIDCProvider) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { *out = *in @@ -4526,6 +4712,23 @@ func (in *PowerVSServiceEndpoint) DeepCopy() *PowerVSServiceEndpoint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixedClaimMapping) DeepCopyInto(out *PrefixedClaimMapping) { + *out = *in + out.TokenClaimMapping = in.TokenClaimMapping + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimMapping. +func (in *PrefixedClaimMapping) DeepCopy() *PrefixedClaimMapping { + if in == nil { + return nil + } + out := new(PrefixedClaimMapping) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Project) DeepCopyInto(out *Project) { *out = *in @@ -5054,6 +5257,23 @@ func (in *ServingInfo) DeepCopy() *ServingInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureStore) DeepCopyInto(out *SignatureStore) { + *out = *in + out.CA = in.CA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureStore. +func (in *SignatureStore) DeepCopy() *SignatureStore { + if in == nil { + return nil + } + out := new(SignatureStore) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StringSource) DeepCopyInto(out *StringSource) { *out = *in @@ -5160,6 +5380,61 @@ func (in *TemplateReference) DeepCopy() *TemplateReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimMapping) DeepCopyInto(out *TokenClaimMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMapping. +func (in *TokenClaimMapping) DeepCopy() *TokenClaimMapping { + if in == nil { + return nil + } + out := new(TokenClaimMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimMappings) DeepCopyInto(out *TokenClaimMappings) { + *out = *in + in.Username.DeepCopyInto(&out.Username) + out.Groups = in.Groups + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMappings. +func (in *TokenClaimMappings) DeepCopy() *TokenClaimMappings { + if in == nil { + return nil + } + out := new(TokenClaimMappings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) { + *out = *in + if in.RequiredClaim != nil { + in, out := &in.RequiredClaim, &out.RequiredClaim + *out = new(TokenRequiredClaim) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimValidationRule. +func (in *TokenClaimValidationRule) DeepCopy() *TokenClaimValidationRule { + if in == nil { + return nil + } + out := new(TokenClaimValidationRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenConfig) DeepCopyInto(out *TokenConfig) { *out = *in @@ -5181,6 +5456,44 @@ func (in *TokenConfig) DeepCopy() *TokenConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenIssuer) DeepCopyInto(out *TokenIssuer) { + *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]TokenAudience, len(*in)) + copy(*out, *in) + } + out.CertificateAuthority = in.CertificateAuthority + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenIssuer. +func (in *TokenIssuer) DeepCopy() *TokenIssuer { + if in == nil { + return nil + } + out := new(TokenIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequiredClaim) DeepCopyInto(out *TokenRequiredClaim) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequiredClaim. +func (in *TokenRequiredClaim) DeepCopy() *TokenRequiredClaim { + if in == nil { + return nil + } + out := new(TokenRequiredClaim) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Update) DeepCopyInto(out *Update) { *out = *in @@ -5218,6 +5531,44 @@ func (in *UpdateHistory) DeepCopy() *UpdateHistory { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsernameClaimMapping) DeepCopyInto(out *UsernameClaimMapping) { + *out = *in + out.TokenClaimMapping = in.TokenClaimMapping + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(UsernamePrefix) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernameClaimMapping. +func (in *UsernameClaimMapping) DeepCopy() *UsernameClaimMapping { + if in == nil { + return nil + } + out := new(UsernameClaimMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsernamePrefix) DeepCopyInto(out *UsernamePrefix) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernamePrefix. +func (in *UsernamePrefix) DeepCopy() *UsernamePrefix { + if in == nil { + return nil + } + out := new(UsernamePrefix) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VSpherePlatformFailureDomainSpec) DeepCopyInto(out *VSpherePlatformFailureDomainSpec) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 048c37b16f..3ea0a78cf0 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -316,7 +316,7 @@ var map_APIServerSpec = map[string]string{ "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.", - "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available minTLSVersion is VersionTLS12.", "audit": "audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster.", } @@ -367,8 +367,9 @@ var map_AuthenticationSpec = map[string]string{ "type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.", "oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.", "webhookTokenAuthenticators": "webhookTokenAuthenticators is DEPRECATED, setting it has no effect.", - "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.", + "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.\n\nCan only be set if \"Type\" is set to \"None\".", "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.", + "oidcProviders": "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.", } func (AuthenticationSpec) SwaggerDoc() map[string]string { @@ -377,6 +378,7 @@ func (AuthenticationSpec) SwaggerDoc() map[string]string { var map_AuthenticationStatus = map[string]string{ "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.", + "oidcClients": "OIDCClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.", } func (AuthenticationStatus) SwaggerDoc() map[string]string { @@ -392,6 +394,113 @@ func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { return map_DeprecatedWebhookTokenAuthenticator } +var map_OIDCClientConfig = map[string]string{ + "componentName": "ComponentName is the name of the component that is supposed to consume this client configuration", + "componentNamespace": "ComponentNamespace is the namespace of the component that is supposed to consume this client configuration", + "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider", + "clientSecret": "ClientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field", + "extraScopes": "ExtraScopes is an optional set of scopes to request tokens with.", +} + +func (OIDCClientConfig) SwaggerDoc() map[string]string { + return map_OIDCClientConfig +} + +var map_OIDCClientReference = map[string]string{ + "oidcProviderName": "OIDCName refers to the `name` of the provider from `oidcProviders`", + "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", + "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider", +} + +func (OIDCClientReference) SwaggerDoc() map[string]string { + return map_OIDCClientReference +} + +var map_OIDCClientStatus = map[string]string{ + "componentName": "ComponentName is the name of the component that will consume a client configuration.", + "componentNamespace": "ComponentNamespace is the namespace of the component that will consume a client configuration.", + "currentOIDCClients": "CurrentOIDCClients is a list of clients that the component is currently using.", + "consumingUsers": "ConsumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.", + "conditions": "Conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", +} + +func (OIDCClientStatus) SwaggerDoc() map[string]string { + return map_OIDCClientStatus +} + +var map_OIDCProvider = map[string]string{ + "name": "Name of the OIDC provider", + "issuer": "Issuer describes atributes of the OIDC token issuer", + "oidcClients": "OIDCClients contains configuration for the platform's clients that need to request tokens from the issuer", + "claimMappings": "ClaimMappings describes rules on how to transform information from an ID token into a cluster identity", + "claimValidationRules": "ClaimValidationRules are rules that are applied to validate token claims to authenticate users.", +} + +func (OIDCProvider) SwaggerDoc() map[string]string { + return map_OIDCProvider +} + +var map_PrefixedClaimMapping = map[string]string{ + "prefix": "Prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", +} + +func (PrefixedClaimMapping) SwaggerDoc() map[string]string { + return map_PrefixedClaimMapping +} + +var map_TokenClaimMapping = map[string]string{ + "claim": "Claim is a JWT token claim to be used in the mapping", +} + +func (TokenClaimMapping) SwaggerDoc() map[string]string { + return map_TokenClaimMapping +} + +var map_TokenClaimMappings = map[string]string{ + "username": "Username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"", + "groups": "Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.", +} + +func (TokenClaimMappings) SwaggerDoc() map[string]string { + return map_TokenClaimMappings +} + +var map_TokenClaimValidationRule = map[string]string{ + "type": "Type sets the type of the validation rule", + "requiredClaim": "RequiredClaim allows configuring a required claim name and its expected value", +} + +func (TokenClaimValidationRule) SwaggerDoc() map[string]string { + return map_TokenClaimValidationRule +} + +var map_TokenIssuer = map[string]string{ + "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", + "audiences": "Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.", + "issuerCertificateAuthority": "CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the \"ca-bundle.crt\" key. If unset, system trust is used instead.", +} + +func (TokenIssuer) SwaggerDoc() map[string]string { + return map_TokenIssuer +} + +var map_TokenRequiredClaim = map[string]string{ + "claim": "Claim is a name of a required claim. Only claims with string values are supported.", + "requiredValue": "RequiredValue is the required value for the claim.", +} + +func (TokenRequiredClaim) SwaggerDoc() map[string]string { + return map_TokenRequiredClaim +} + +var map_UsernameClaimMapping = map[string]string{ + "prefixPolicy": "PrefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"", +} + +func (UsernameClaimMapping) SwaggerDoc() map[string]string { + return map_UsernameClaimMapping +} + var map_WebhookTokenAuthenticator = map[string]string{ "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator", "kubeConfig": "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config.\n\nFor further details, see:\n\nhttps://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication\n\nThe key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored.", @@ -587,13 +696,14 @@ func (ClusterVersionList) SwaggerDoc() map[string]string { } var map_ClusterVersionSpec = map[string]string{ - "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.", - "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", - "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", - "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", - "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.", - "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.", - "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.", + "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.", + "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", + "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", + "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", + "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.", + "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.", + "signatureStores": "signatureStores contains the upstream URIs to verify release signatures and optional reference to a config map by name containing the PEM-encoded CA bundle.\n\nBy default, CVO will use existing signature stores if this property is empty. The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature in these stores in parallel only when local ConfigMaps did not include a valid signature. Validation will fail if none of the signature stores reply with valid signature before timeout. Setting signatureStores will replace the default signature stores with custom signature stores. Default stores can be used with custom signature stores by adding them manually.\n\nA maximum of 32 signature stores may be configured.", + "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.", } func (ClusterVersionSpec) SwaggerDoc() map[string]string { @@ -673,6 +783,16 @@ func (Release) SwaggerDoc() map[string]string { return map_Release } +var map_SignatureStore = map[string]string{ + "": "SignatureStore represents the URL of custom Signature Store", + "url": "url contains the upstream custom signature store URL. url should be a valid absolute http/https URI of an upstream signature store as per rfc1738. This must be provided and cannot be empty.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the signature store is not honored. If the specified ca data is not valid, the signature store is not honored. If empty, we fall back to the CA configured via Proxy, which is appended to the default system roots. The namespace for this config map is openshift-config.", +} + +func (SignatureStore) SwaggerDoc() map[string]string { + return map_SignatureStore +} + var map_Update = map[string]string{ "": "Update represents an administrator update request.", "architecture": "architecture is an optional field that indicates the desired value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. architecture can only be set to Multi thereby only allowing updates from single to multi architecture. If architecture is set, image cannot be set and version must be set. Valid values are 'Multi' and empty.", @@ -1237,11 +1357,12 @@ func (GCPPlatformSpec) SwaggerDoc() map[string]string { } var map_GCPPlatformStatus = map[string]string{ - "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.", - "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.", - "region": "region holds the region for new GCP resources created for the cluster.", - "resourceLabels": "resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration.", - "resourceTags": "resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.", + "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.", + "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.", + "region": "region holds the region for new GCP resources created for the cluster.", + "resourceLabels": "resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration.", + "resourceTags": "resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.", + "clusterHostedDNS": "clusterHostedDNS indicates the type of DNS solution in use within the cluster. Its default value of \"Disabled\" indicates that the cluster's DNS is the default provided by the cloud platform. It can be \"Enabled\" during install to bypass the configuration of the cloud default DNS. When \"Enabled\", the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. The cluster's use of the cloud's Load Balancers is unaffected by this setting. The value is immutable after it has been set at install time. Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. Enabling this functionality allows the user to start their own DNS solution outside the cluster after installation is complete. The customer would be responsible for configuring this custom DNS solution, and it can be run in addition to the in-cluster DNS solution.", } func (GCPPlatformStatus) SwaggerDoc() map[string]string { @@ -1293,7 +1414,7 @@ func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { var map_IBMCloudServiceEndpoint = map[string]string{ "": "IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services.", - "name": "name is the name of the IBM Cloud service. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`", + "name": "name is the name of the IBM Cloud service. Possible values are: CIS, COS, DNSServices, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`", "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", } @@ -1366,6 +1487,17 @@ func (KubevirtPlatformStatus) SwaggerDoc() map[string]string { return map_KubevirtPlatformStatus } +var map_NutanixFailureDomain = map[string]string{ + "": "NutanixFailureDomain configures failure domain information for the Nutanix platform.", + "name": "name defines the unique name of a failure domain. Name is required and must be at most 64 characters in length. It must consist of only lower case alphanumeric characters and hyphens (-). It must start and end with an alphanumeric character. This value is arbitrary and is used to identify the failure domain within the platform.", + "cluster": "cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", + "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", +} + +func (NutanixFailureDomain) SwaggerDoc() map[string]string { + return map_NutanixFailureDomain +} + var map_NutanixPlatformLoadBalancer = map[string]string{ "": "NutanixPlatformLoadBalancer defines the load balancer used by the cluster on Nutanix platform.", "type": "type defines the type of load balancer used by the cluster on Nutanix platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", @@ -1376,9 +1508,10 @@ func (NutanixPlatformLoadBalancer) SwaggerDoc() map[string]string { } var map_NutanixPlatformSpec = map[string]string{ - "": "NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. This only includes fields that can be modified in the cluster.", - "prismCentral": "prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.", - "prismElements": "prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central.", + "": "NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. This only includes fields that can be modified in the cluster.", + "prismCentral": "prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.", + "prismElements": "prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central.", + "failureDomains": "failureDomains configures failure domains information for the Nutanix platform. When set, the failure domains defined here may be used to spread Machines across prism element clusters to improve fault tolerance of the cluster.", } func (NutanixPlatformSpec) SwaggerDoc() map[string]string { @@ -1418,6 +1551,17 @@ func (NutanixPrismEndpoint) SwaggerDoc() map[string]string { return map_NutanixPrismEndpoint } +var map_NutanixResourceIdentifier = map[string]string{ + "": "NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.)", + "type": "type is the identifier type to use for this resource.", + "uuid": "uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID.", + "name": "name is the resource name in the PC. It cannot be empty if the type is Name.", +} + +func (NutanixResourceIdentifier) SwaggerDoc() map[string]string { + return map_NutanixResourceIdentifier +} + var map_OpenStackPlatformLoadBalancer = map[string]string{ "": "OpenStackPlatformLoadBalancer defines the load balancer used by the cluster on OpenStack platform.", "type": "type defines the type of load balancer used by the cluster on OpenStack platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", @@ -1636,6 +1780,7 @@ var map_VSpherePlatformTopology = map[string]string{ "datastore": "datastore is the absolute path of the datastore in which the virtual machine is located. The absolute path is of the form //datastore/ The maximum length of the path is 2048 characters.", "resourcePool": "resourcePool is the absolute path of the resource pool where virtual machines will be created. The absolute path is of the form //host//Resources/. The maximum length of the path is 2048 characters.", "folder": "folder is the absolute path of the folder where virtual machines are located. The absolute path is of the form //vm/. The maximum length of the path is 2048 characters.", + "template": "template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters.\n\nWhen omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea.", } func (VSpherePlatformTopology) SwaggerDoc() map[string]string { @@ -1848,6 +1993,7 @@ var map_NetworkStatus = map[string]string{ "networkType": "NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).", "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.", "migration": "Migration contains the cluster network migration configuration.", + "conditions": "conditions represents the observations of a network.config current state. Known .status.conditions.type are: \"NetworkTypeMigrationInProgress\", \"NetworkTypeMigrationMTUReady\", \"NetworkTypeMigrationTargetCNIAvailable\", \"NetworkTypeMigrationTargetCNIInUse\" and \"NetworkTypeMigrationOriginalCNIPurged\"", } func (NetworkStatus) SwaggerDoc() map[string]string { @@ -2322,7 +2468,7 @@ func (OldTLSProfile) SwaggerDoc() map[string]string { var map_TLSProfileSpec = map[string]string{ "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.", "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA", - "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: TLSv1.1\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12", + "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: VersionTLS11\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12", } func (TLSProfileSpec) SwaggerDoc() map[string]string { @@ -2332,10 +2478,10 @@ func (TLSProfileSpec) SwaggerDoc() map[string]string { var map_TLSSecurityProfile = map[string]string{ "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.", "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.\n\nNote that the Modern profile is currently not supported because it is not yet well adopted by common software libraries.", - "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n - DHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA384\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - DHE-RSA-AES128-SHA256\n - DHE-RSA-AES256-SHA256\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES256-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA\n minTLSVersion: TLSv1.0", - "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n minTLSVersion: TLSv1.2", - "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n minTLSVersion: TLSv1.3\n\nNOTE: Currently unsupported.", - "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n minTLSVersion: TLSv1.1", + "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n - DHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA384\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - DHE-RSA-AES128-SHA256\n - DHE-RSA-AES256-SHA256\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES256-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA\n minTLSVersion: VersionTLS10", + "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n minTLSVersion: VersionTLS12", + "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n minTLSVersion: VersionTLS13\n\nNOTE: Currently unsupported.", + "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n minTLSVersion: VersionTLS11", } func (TLSSecurityProfile) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/console/v1/90_consoleplugin.crd.yaml b/vendor/github.com/openshift/api/console/v1/90_consoleplugin.crd.yaml index 5734ebe0b6..641d0d3e16 100644 --- a/vendor/github.com/openshift/api/console/v1/90_consoleplugin.crd.yaml +++ b/vendor/github.com/openshift/api/console/v1/90_consoleplugin.crd.yaml @@ -179,7 +179,7 @@ spec: enum: - Service served: true - storage: false + storage: true - name: v1alpha1 schema: openAPIV3Schema: @@ -291,4 +291,4 @@ spec: maximum: 65535 minimum: 1 served: true - storage: true + storage: false diff --git a/vendor/github.com/openshift/api/console/v1alpha1/90_consoleplugin.crd.yaml b/vendor/github.com/openshift/api/console/v1alpha1/90_consoleplugin.crd.yaml index 52034c96d8..d6c5fa33da 100644 --- a/vendor/github.com/openshift/api/console/v1alpha1/90_consoleplugin.crd.yaml +++ b/vendor/github.com/openshift/api/console/v1alpha1/90_consoleplugin.crd.yaml @@ -179,7 +179,7 @@ spec: enum: - Service served: true - storage: false + storage: true - name: v1alpha1 schema: openAPIV3Schema: @@ -291,4 +291,4 @@ spec: maximum: 65535 minimum: 1 served: true - storage: true + storage: false diff --git a/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml b/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml index e2406e37cb..67d1c93d30 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml +++ b/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml @@ -680,6 +680,50 @@ spec: minLength: 3 pattern: ^[0-9a-z]+(-[0-9a-z]+)*$ type: string + networkAccess: + default: + type: External + description: 'networkAccess defines the network access properties for the storage account. Defaults to type: External.' + properties: + internal: + description: 'internal defines the vnet and subnet names to configure a private endpoint and connect it to the storage account in order to make it private. when type: Internal and internal is unset, the image registry operator will discover vnet and subnet names, and generate a private endpoint name.' + properties: + networkResourceGroupName: + description: networkResourceGroupName is the resource group name where the cluster's vnet and subnet are. When omitted, the registry operator will use the cluster resource group (from in the infrastructure status). If you set a networkResourceGroupName on your install-config.yaml, that value will be used automatically (for clusters configured with publish:Internal). Note that both vnet and subnet must be in the same resource group. It must be between 1 and 90 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_), and not end with a period. + maxLength: 90 + minLength: 1 + pattern: ^[0-9A-Za-z_.-](?:[0-9A-Za-z_.-]*[0-9A-Za-z_-])?$ + type: string + privateEndpointName: + description: privateEndpointName is the name of the private endpoint for the registry. When provided, the registry will use it as the name of the private endpoint it will create for the storage account. When omitted, the registry will generate one. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + maxLength: 64 + minLength: 2 + pattern: ^[0-9A-Za-z][0-9A-Za-z_.-]*[0-9A-Za-z_]$ + type: string + subnetName: + description: subnetName is the name of the subnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource, then using one of listed subnets. Advanced cluster network configurations that use network security groups to protect subnets should ensure the provided subnetName has access to Azure Storage service. It must be between 1 and 80 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). + maxLength: 80 + minLength: 1 + pattern: ^[0-9A-Za-z](?:[0-9A-Za-z_.-]*[0-9A-Za-z_])?$ + type: string + vnetName: + description: vnetName is the name of the vnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource. This tag is set automatically by the installer. Commonly, this will be the same vnet as the cluster. Advanced cluster network configurations should ensure the provided vnetName is the vnet of the nodes where the image registry pods are running from. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + maxLength: 64 + minLength: 2 + pattern: ^[0-9A-Za-z][0-9A-Za-z_.-]*[0-9A-Za-z_]$ + type: string + type: object + type: + default: External + description: 'type is the network access level to be used for the storage account. type: Internal means the storage account will be private, type: External means the storage account will be publicly accessible. Internal storage accounts are only exposed within the cluster''s vnet. External storage accounts are publicly exposed on the internet. When type: Internal is used, a vnetName, subNetName and privateEndpointName may optionally be specified. If unspecificed, the image registry operator will discover vnet and subnet names, and generate a privateEndpointName. Defaults to "External".' + enum: + - Internal + - External + type: string + type: object + x-kubernetes-validations: + - message: internal is forbidden when type is not Internal + rule: 'has(self.type) && self.type == ''Internal'' ? true : !has(self.internal)' type: object emptyDir: description: 'emptyDir represents ephemeral storage on the pod''s host node. WARNING: this storage cannot be used with more than 1 replica and is not suitable for production use. When the pod is removed from a node for any reason, the data in the emptyDir is deleted forever.' @@ -1055,6 +1099,50 @@ spec: minLength: 3 pattern: ^[0-9a-z]+(-[0-9a-z]+)*$ type: string + networkAccess: + default: + type: External + description: 'networkAccess defines the network access properties for the storage account. Defaults to type: External.' + properties: + internal: + description: 'internal defines the vnet and subnet names to configure a private endpoint and connect it to the storage account in order to make it private. when type: Internal and internal is unset, the image registry operator will discover vnet and subnet names, and generate a private endpoint name.' + properties: + networkResourceGroupName: + description: networkResourceGroupName is the resource group name where the cluster's vnet and subnet are. When omitted, the registry operator will use the cluster resource group (from in the infrastructure status). If you set a networkResourceGroupName on your install-config.yaml, that value will be used automatically (for clusters configured with publish:Internal). Note that both vnet and subnet must be in the same resource group. It must be between 1 and 90 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_), and not end with a period. + maxLength: 90 + minLength: 1 + pattern: ^[0-9A-Za-z_.-](?:[0-9A-Za-z_.-]*[0-9A-Za-z_-])?$ + type: string + privateEndpointName: + description: privateEndpointName is the name of the private endpoint for the registry. When provided, the registry will use it as the name of the private endpoint it will create for the storage account. When omitted, the registry will generate one. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + maxLength: 64 + minLength: 2 + pattern: ^[0-9A-Za-z][0-9A-Za-z_.-]*[0-9A-Za-z_]$ + type: string + subnetName: + description: subnetName is the name of the subnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource, then using one of listed subnets. Advanced cluster network configurations that use network security groups to protect subnets should ensure the provided subnetName has access to Azure Storage service. It must be between 1 and 80 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). + maxLength: 80 + minLength: 1 + pattern: ^[0-9A-Za-z](?:[0-9A-Za-z_.-]*[0-9A-Za-z_])?$ + type: string + vnetName: + description: vnetName is the name of the vnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource. This tag is set automatically by the installer. Commonly, this will be the same vnet as the cluster. Advanced cluster network configurations should ensure the provided vnetName is the vnet of the nodes where the image registry pods are running from. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + maxLength: 64 + minLength: 2 + pattern: ^[0-9A-Za-z][0-9A-Za-z_.-]*[0-9A-Za-z_]$ + type: string + type: object + type: + default: External + description: 'type is the network access level to be used for the storage account. type: Internal means the storage account will be private, type: External means the storage account will be publicly accessible. Internal storage accounts are only exposed within the cluster''s vnet. External storage accounts are publicly exposed on the internet. When type: Internal is used, a vnetName, subNetName and privateEndpointName may optionally be specified. If unspecificed, the image registry operator will discover vnet and subnet names, and generate a privateEndpointName. Defaults to "External".' + enum: + - Internal + - External + type: string + type: object + x-kubernetes-validations: + - message: internal is forbidden when type is not Internal + rule: 'has(self.type) && self.type == ''Internal'' ? true : !has(self.internal)' type: object emptyDir: description: 'emptyDir represents ephemeral storage on the pod''s host node. WARNING: this storage cannot be used with more than 1 replica and is not suitable for production use. When the pod is removed from a node for any reason, the data in the emptyDir is deleted forever.' diff --git a/vendor/github.com/openshift/api/imageregistry/v1/stable.config.testsuite.yaml b/vendor/github.com/openshift/api/imageregistry/v1/stable.config.testsuite.yaml index 85a6b45d53..9d64cece98 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/stable.config.testsuite.yaml +++ b/vendor/github.com/openshift/api/imageregistry/v1/stable.config.testsuite.yaml @@ -16,3 +16,94 @@ tests: logLevel: Normal operatorLogLevel: Normal replicas: 1 + - name: Should be able to configure internal storage network access for Azure + initial: | + apiVersion: imageregistry.operator.openshift.io/v1 + kind: Config + spec: + replicas: 1 + storage: + azure: + networkAccess: + type: Internal + internal: + networkResourceGroupName: .my.resource_group-name + vnetName: foo-bar + subnetName: bar_foo + privateEndpointName: my-private-endpoint + expected: | + apiVersion: imageregistry.operator.openshift.io/v1 + kind: Config + spec: + logLevel: Normal + operatorLogLevel: Normal + replicas: 1 + storage: + azure: + networkAccess: + type: Internal + internal: + networkResourceGroupName: .my.resource_group-name + vnetName: foo-bar + subnetName: bar_foo + privateEndpointName: my-private-endpoint + - name: Should be able to configure internal storage network access for Azure without details + initial: | + apiVersion: imageregistry.operator.openshift.io/v1 + kind: Config + spec: + replicas: 1 + storage: + azure: + networkAccess: + type: Internal + expected: | + apiVersion: imageregistry.operator.openshift.io/v1 + kind: Config + spec: + logLevel: Normal + operatorLogLevel: Normal + replicas: 1 + storage: + azure: + networkAccess: + type: Internal + - name: Should reject configuration with invalid vnet name in internal storage network access config + initial: | + apiVersion: imageregistry.operator.openshift.io/v1 + kind: Config + spec: + replicas: 1 + storage: + azure: + networkAccess: + type: Internal + internal: + vnetName: .invalid + expectedError: "spec.storage.azure.networkAccess.internal.vnetName in body should match '^[0-9A-Za-z][0-9A-Za-z_.-]*[0-9A-Za-z_]$'" + - name: Should reject configuration with invalid subnet name in internal storage network access config + initial: | + apiVersion: imageregistry.operator.openshift.io/v1 + kind: Config + spec: + replicas: 1 + storage: + azure: + networkAccess: + type: Internal + internal: + subnetName: .invalid + expectedError: "spec.storage.azure.networkAccess.internal.subnetName in body should match '^[0-9A-Za-z](?:[0-9A-Za-z_.-]*[0-9A-Za-z_])?$'" + - name: Should reject configuration with invalid network resource group name in internal storage network access config + initial: | + apiVersion: imageregistry.operator.openshift.io/v1 + kind: Config + spec: + replicas: 1 + storage: + azure: + networkAccess: + type: Internal + internal: + networkResourceGroupName: invalid. + expectedError: "spec.storage.azure.networkAccess.internal.networkResourceGroupName in body should match '^[0-9A-Za-z_.-](?:[0-9A-Za-z_.-]*[0-9A-Za-z_-])?$'" diff --git a/vendor/github.com/openshift/api/imageregistry/v1/types.go b/vendor/github.com/openshift/api/imageregistry/v1/types.go index e9c0ca3098..840ac74c92 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/types.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/types.go @@ -159,8 +159,7 @@ type ImageRegistryConfigStorageS3CloudFront struct { // ImageRegistryConfigStorageEmptyDir is an place holder to be used when // when registry is leveraging ephemeral storage. -type ImageRegistryConfigStorageEmptyDir struct { -} +type ImageRegistryConfigStorageEmptyDir struct{} // S3TrustedCASource references a config map with a CA certificate bundle in // the "openshift-config" namespace. The key for the bundle in the @@ -303,8 +302,107 @@ type ImageRegistryConfigStorageAzure struct { // object. // +optional CloudName string `json:"cloudName,omitempty"` + // networkAccess defines the network access properties for the storage account. + // Defaults to type: External. + // +kubebuilder:default={"type": "External"} + // +optional + NetworkAccess *AzureNetworkAccess `json:"networkAccess,omitempty"` +} + +// AzureNetworkAccess defines the network access properties for the storage account. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Internal' ? true : !has(self.internal)",message="internal is forbidden when type is not Internal" +// +union +type AzureNetworkAccess struct { + // type is the network access level to be used for the storage account. + // type: Internal means the storage account will be private, type: External + // means the storage account will be publicly accessible. + // Internal storage accounts are only exposed within the cluster's vnet. + // External storage accounts are publicly exposed on the internet. + // When type: Internal is used, a vnetName, subNetName and privateEndpointName + // may optionally be specified. If unspecificed, the image registry operator + // will discover vnet and subnet names, and generate a privateEndpointName. + // Defaults to "External". + // +kubebuilder:default:="External" + // +unionDiscriminator + // +optional + Type AzureNetworkAccessType `json:"type,omitempty"` + // internal defines the vnet and subnet names to configure a private + // endpoint and connect it to the storage account in order to make it + // private. + // when type: Internal and internal is unset, the image registry operator + // will discover vnet and subnet names, and generate a private endpoint + // name. + // +optional + Internal *AzureNetworkAccessInternal `json:"internal,omitempty"` +} + +type AzureNetworkAccessInternal struct { + // networkResourceGroupName is the resource group name where the cluster's vnet + // and subnet are. When omitted, the registry operator will use the cluster + // resource group (from in the infrastructure status). + // If you set a networkResourceGroupName on your install-config.yaml, that + // value will be used automatically (for clusters configured with publish:Internal). + // Note that both vnet and subnet must be in the same resource group. + // It must be between 1 and 90 characters in length and must consist only of + // alphanumeric characters, hyphens (-), periods (.) and underscores (_), and + // not end with a period. + // +kubebuilder:validation:MaxLength=90 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.-](?:[0-9A-Za-z_.-]*[0-9A-Za-z_-])?$` + // +optional + NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"` + // vnetName is the name of the vnet the registry operates in. When omitted, + // the registry operator will discover and set this by using the `kubernetes.io_cluster.` + // tag in the vnet resource. This tag is set automatically by the installer. + // Commonly, this will be the same vnet as the cluster. + // Advanced cluster network configurations should ensure the provided vnetName + // is the vnet of the nodes where the image registry pods are running from. + // It must be between 2 and 64 characters in length and must consist only of + // alphanumeric characters, hyphens (-), periods (.) and underscores (_). + // It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:MinLength=2 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z][0-9A-Za-z_.-]*[0-9A-Za-z_]$` + // +optional + VNetName string `json:"vnetName,omitempty"` + // subnetName is the name of the subnet the registry operates in. When omitted, + // the registry operator will discover and set this by using the `kubernetes.io_cluster.` + // tag in the vnet resource, then using one of listed subnets. + // Advanced cluster network configurations that use network security groups + // to protect subnets should ensure the provided subnetName has access to + // Azure Storage service. + // It must be between 1 and 80 characters in length and must consist only of + // alphanumeric characters, hyphens (-), periods (.) and underscores (_). + // +kubebuilder:validation:MaxLength=80 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z](?:[0-9A-Za-z_.-]*[0-9A-Za-z_])?$` + // +optional + SubnetName string `json:"subnetName,omitempty"` + // privateEndpointName is the name of the private endpoint for the registry. + // When provided, the registry will use it as the name of the private endpoint + // it will create for the storage account. When omitted, the registry will + // generate one. + // It must be between 2 and 64 characters in length and must consist only of + // alphanumeric characters, hyphens (-), periods (.) and underscores (_). + // It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:MinLength=2 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z][0-9A-Za-z_.-]*[0-9A-Za-z_]$` + // +optional + PrivateEndpointName string `json:"privateEndpointName,omitempty"` } +// AzureNetworkAccessType is the network access level to be used for the storage account. +// +kubebuilder:validation:Enum:="Internal";"External" +type AzureNetworkAccessType string + +const ( + // AzureNetworkAccessTypeInternal means the storage account will be private + AzureNetworkAccessTypeInternal AzureNetworkAccessType = "Internal" + // AzureNetworkAccessTypeExternal means the storage account will be publicly accessible + AzureNetworkAccessTypeExternal AzureNetworkAccessType = "External" +) + // ImageRegistryConfigStorageIBMCOS holds the information to configure // the registry to use IBM Cloud Object Storage for backend storage. type ImageRegistryConfigStorageIBMCOS struct { diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go index 7519720a12..63f25fc19e 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go @@ -14,6 +14,43 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureNetworkAccess) DeepCopyInto(out *AzureNetworkAccess) { + *out = *in + if in.Internal != nil { + in, out := &in.Internal, &out.Internal + *out = new(AzureNetworkAccessInternal) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNetworkAccess. +func (in *AzureNetworkAccess) DeepCopy() *AzureNetworkAccess { + if in == nil { + return nil + } + out := new(AzureNetworkAccess) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureNetworkAccessInternal) DeepCopyInto(out *AzureNetworkAccessInternal) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNetworkAccessInternal. +func (in *AzureNetworkAccessInternal) DeepCopy() *AzureNetworkAccessInternal { + if in == nil { + return nil + } + out := new(AzureNetworkAccessInternal) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Config) DeepCopyInto(out *Config) { *out = *in @@ -348,7 +385,7 @@ func (in *ImageRegistryConfigStorage) DeepCopyInto(out *ImageRegistryConfigStora if in.Azure != nil { in, out := &in.Azure, &out.Azure *out = new(ImageRegistryConfigStorageAzure) - **out = **in + (*in).DeepCopyInto(*out) } if in.IBMCOS != nil { in, out := &in.IBMCOS, &out.IBMCOS @@ -397,6 +434,11 @@ func (in *ImageRegistryConfigStorageAlibabaOSS) DeepCopy() *ImageRegistryConfigS // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageRegistryConfigStorageAzure) DeepCopyInto(out *ImageRegistryConfigStorageAzure) { *out = *in + if in.NetworkAccess != nil { + in, out := &in.NetworkAccess, &out.NetworkAccess + *out = new(AzureNetworkAccess) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go index 391381ea0e..24d6d780dd 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go @@ -11,6 +11,27 @@ package v1 // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_AzureNetworkAccess = map[string]string{ + "": "AzureNetworkAccess defines the network access properties for the storage account.", + "type": "type is the network access level to be used for the storage account. type: Internal means the storage account will be private, type: External means the storage account will be publicly accessible. Internal storage accounts are only exposed within the cluster's vnet. External storage accounts are publicly exposed on the internet. When type: Internal is used, a vnetName, subNetName and privateEndpointName may optionally be specified. If unspecificed, the image registry operator will discover vnet and subnet names, and generate a privateEndpointName. Defaults to \"External\".", + "internal": "internal defines the vnet and subnet names to configure a private endpoint and connect it to the storage account in order to make it private. when type: Internal and internal is unset, the image registry operator will discover vnet and subnet names, and generate a private endpoint name.", +} + +func (AzureNetworkAccess) SwaggerDoc() map[string]string { + return map_AzureNetworkAccess +} + +var map_AzureNetworkAccessInternal = map[string]string{ + "networkResourceGroupName": "networkResourceGroupName is the resource group name where the cluster's vnet and subnet are. When omitted, the registry operator will use the cluster resource group (from in the infrastructure status). If you set a networkResourceGroupName on your install-config.yaml, that value will be used automatically (for clusters configured with publish:Internal). Note that both vnet and subnet must be in the same resource group. It must be between 1 and 90 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_), and not end with a period.", + "vnetName": "vnetName is the name of the vnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource. This tag is set automatically by the installer. Commonly, this will be the same vnet as the cluster. Advanced cluster network configurations should ensure the provided vnetName is the vnet of the nodes where the image registry pods are running from. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore.", + "subnetName": "subnetName is the name of the subnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource, then using one of listed subnets. Advanced cluster network configurations that use network security groups to protect subnets should ensure the provided subnetName has access to Azure Storage service. It must be between 1 and 80 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_).", + "privateEndpointName": "privateEndpointName is the name of the private endpoint for the registry. When provided, the registry will use it as the name of the private endpoint it will create for the storage account. When omitted, the registry will generate one. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore.", +} + +func (AzureNetworkAccessInternal) SwaggerDoc() map[string]string { + return map_AzureNetworkAccessInternal +} + var map_Config = map[string]string{ "": "Config is the configuration object for a registry instance managed by the registry operator\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -112,10 +133,11 @@ func (ImageRegistryConfigStorageAlibabaOSS) SwaggerDoc() map[string]string { } var map_ImageRegistryConfigStorageAzure = map[string]string{ - "": "ImageRegistryConfigStorageAzure holds the information to configure the registry to use Azure Blob Storage for backend storage.", - "accountName": "accountName defines the account to be used by the registry.", - "container": "container defines Azure's container to be used by registry.", - "cloudName": "cloudName is the name of the Azure cloud environment to be used by the registry. If empty, the operator will set it based on the infrastructure object.", + "": "ImageRegistryConfigStorageAzure holds the information to configure the registry to use Azure Blob Storage for backend storage.", + "accountName": "accountName defines the account to be used by the registry.", + "container": "container defines Azure's container to be used by registry.", + "cloudName": "cloudName is the name of the Azure cloud environment to be used by the registry. If empty, the operator will set it based on the infrastructure object.", + "networkAccess": "networkAccess defines the network access properties for the storage account. Defaults to type: External.", } func (ImageRegistryConfigStorageAzure) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/install.go b/vendor/github.com/openshift/api/install.go index d7668b3c03..cc91150009 100644 --- a/vendor/github.com/openshift/api/install.go +++ b/vendor/github.com/openshift/api/install.go @@ -26,7 +26,6 @@ import ( keventsv1 "k8s.io/api/events/v1" keventsv1beta1 "k8s.io/api/events/v1beta1" kextensionsv1beta1 "k8s.io/api/extensions/v1beta1" - kflowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" kflowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" kflowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" kimagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" @@ -143,7 +142,6 @@ var ( keventsv1.AddToScheme, keventsv1beta1.AddToScheme, kextensionsv1beta1.AddToScheme, - kflowcontrolv1alpha1.AddToScheme, kflowcontrolv1beta1.AddToScheme, kflowcontrolv1beta2.AddToScheme, kimagepolicyv1alpha1.AddToScheme, diff --git a/vendor/github.com/openshift/api/machine/.codegen.yaml b/vendor/github.com/openshift/api/machine/.codegen.yaml index ffa2c8d9b2..e799336feb 100644 --- a/vendor/github.com/openshift/api/machine/.codegen.yaml +++ b/vendor/github.com/openshift/api/machine/.codegen.yaml @@ -1,2 +1,8 @@ +schemapatch: + requiredFeatureSets: + - "" + - "Default" + - "TechPreviewNoUpgrade" + - "CustomNoUpgrade" swaggerdocs: commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-CustomNoUpgrade.crd.yaml new file mode 100644 index 0000000000..61f84a389d --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-CustomNoUpgrade.crd.yaml @@ -0,0 +1,604 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1112 + capability.openshift.io/name: MachineAPI + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: CustomNoUpgrade + creationTimestamp: null + name: controlplanemachinesets.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: ControlPlaneMachineSet + listKind: ControlPlaneMachineSetList + plural: controlplanemachinesets + singular: controlplanemachineset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Desired Replicas + jsonPath: .spec.replicas + name: Desired + type: integer + - description: Current Replicas + jsonPath: .status.replicas + name: Current + type: integer + - description: Ready Replicas + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Updated Replicas + jsonPath: .status.updatedReplicas + name: Updated + type: integer + - description: Observed number of unavailable replicas + jsonPath: .status.unavailableReplicas + name: Unavailable + type: integer + - description: ControlPlaneMachineSet state + jsonPath: .spec.state + name: State + type: string + - description: ControlPlaneMachineSet age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: 'ControlPlaneMachineSet ensures that a specified number of control plane machine replicas are running at any given time. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet. + type: object + required: + - replicas + - selector + - template + properties: + replicas: + description: Replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field. + type: integer + format: int32 + default: 3 + enum: + - 3 + - 5 + x-kubernetes-validations: + - rule: self == oldSelf + message: replicas is immutable + selector: + description: Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - rule: self == oldSelf + message: selector is immutable + state: + description: State defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet. + type: string + default: Inactive + enum: + - Active + - Inactive + x-kubernetes-validations: + - rule: oldSelf != 'Active' || self == oldSelf + message: state cannot be changed once Active + strategy: + description: Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec. + type: object + default: + type: RollingUpdate + properties: + type: + description: Type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are "RollingUpdate" and "OnDelete". The current default value is "RollingUpdate". + type: string + default: RollingUpdate + enum: + - RollingUpdate + - OnDelete + template: + description: Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet. + type: object + required: + - machineType + properties: + machineType: + description: MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io. + type: string + enum: + - machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + description: OpenShiftMachineV1Beta1Machine defines the template for creating Machines from the v1beta1.machine.openshift.io API group. + type: object + required: + - metadata + - spec + properties: + failureDomains: + description: FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information. + type: object + required: + - platform + properties: + aws: + description: AWS configures failure domain information for the AWS platform. + type: array + items: + description: AWSFailureDomain configures failure domain information for the AWS platform. + type: object + minProperties: 1 + properties: + placement: + description: Placement configures the placement information for this instance. + type: object + required: + - availabilityZone + properties: + availabilityZone: + description: AvailabilityZone is the availability zone of the instance. + type: string + subnet: + description: Subnet is a reference to the subnet to use for this instance. + type: object + required: + - type + properties: + arn: + description: ARN of resource. + type: string + filters: + description: Filters is a set of filters used to identify a resource. + type: array + items: + description: AWSResourceFilter is a filter used to identify an AWS resource + type: object + required: + - name + properties: + name: + description: Name of the filter. Filter names are case-sensitive. + type: string + values: + description: Values includes one or more filter values. Filter values are case-sensitive. + type: array + items: + type: string + id: + description: ID of resource. + type: string + type: + description: Type determines how the reference will fetch the AWS resource. + type: string + enum: + - ID + - ARN + - Filters + x-kubernetes-validations: + - rule: 'has(self.type) && self.type == ''ID'' ? has(self.id) : !has(self.id)' + message: id is required when type is ID, and forbidden otherwise + - rule: 'has(self.type) && self.type == ''ARN'' ? has(self.arn) : !has(self.arn)' + message: arn is required when type is ARN, and forbidden otherwise + - rule: 'has(self.type) && self.type == ''Filters'' ? has(self.filters) : !has(self.filters)' + message: filters is required when type is Filters, and forbidden otherwise + azure: + description: Azure configures failure domain information for the Azure platform. + type: array + items: + description: AzureFailureDomain configures failure domain information for the Azure platform. + type: object + required: + - zone + properties: + subnet: + description: subnet is the name of the network subnet in which the VM will be created. When omitted, the subnet value from the machine providerSpec template will be used. + type: string + maxLength: 80 + pattern: ^[a-zA-Z0-9](?:[a-zA-Z0-9._-]*[a-zA-Z0-9_])?$ + zone: + description: Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone. + type: string + gcp: + description: GCP configures failure domain information for the GCP platform. + type: array + items: + description: GCPFailureDomain configures failure domain information for the GCP platform + type: object + required: + - zone + properties: + zone: + description: Zone is the zone in which the GCP machine provider will create the VM. + type: string + nutanix: + description: nutanix configures failure domain information for the Nutanix platform. + type: array + items: + description: NutanixFailureDomainReference refers to the failure domain of the Nutanix platform. + type: object + required: + - name + properties: + name: + description: name of the failure domain in which the nutanix machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. + type: string + maxLength: 64 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + openstack: + description: OpenStack configures failure domain information for the OpenStack platform. + type: array + items: + description: OpenStackFailureDomain configures failure domain information for the OpenStack platform. + type: object + minProperties: 1 + properties: + availabilityZone: + description: 'availabilityZone is the nova availability zone in which the OpenStack machine provider will create the VM. If not specified, the VM will be created in the default availability zone specified in the nova configuration. Availability zone names must NOT contain : since it is used by admin users to specify hosts where instances are launched in server creation. Also, it must not contain spaces otherwise it will lead to node that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits.' + type: string + maxLength: 63 + minLength: 1 + pattern: '^[^: ]*$' + rootVolume: + description: rootVolume contains settings that will be used by the OpenStack machine provider to create the root volume attached to the VM. If not specified, no root volume will be created. + type: object + required: + - volumeType + properties: + availabilityZone: + description: availabilityZone specifies the Cinder availability zone where the root volume will be created. If not specifified, the root volume will be created in the availability zone specified by the volume type in the cinder configuration. If the volume type (configured in the OpenStack cluster) does not specify an availability zone, the root volume will be created in the default availability zone specified in the cinder configuration. See https://docs.openstack.org/cinder/latest/admin/availability-zone-type.html for more details. If the OpenStack cluster is deployed with the cross_az_attach configuration option set to false, the root volume will have to be in the same availability zone as the VM (defined by OpenStackFailureDomain.AvailabilityZone). Availability zone names must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits. + type: string + maxLength: 63 + minLength: 1 + pattern: ^[^ ]*$ + volumeType: + description: volumeType specifies the type of the root volume that will be provisioned. The maximum length of a volume type name is 255 characters, as per the OpenStack limit. + type: string + maxLength: 255 + minLength: 1 + x-kubernetes-validations: + - rule: '!has(self.availabilityZone) || !has(self.rootVolume) || has(self.rootVolume.availabilityZone)' + message: rootVolume.availabilityZone is required when availabilityZone is set + platform: + description: Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix. + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + vsphere: + description: vsphere configures failure domain information for the VSphere platform. + type: array + items: + description: VSphereFailureDomain configures failure domain information for the vSphere platform + type: object + required: + - name + properties: + name: + description: name of the failure domain in which the vSphere machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. When balancing machines across failure domains, the control plane machine set will inject configuration from the Infrastructure resource into the machine providerSpec to allocate the machine to a failure domain. + type: string + x-kubernetes-validations: + - rule: 'has(self.platform) && self.platform == ''VSphere'' ? has(self.vsphere) : !has(self.vsphere)' + message: vsphere configuration is required when platform is VSphere, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''AWS'' ? has(self.aws) : !has(self.aws)' + message: aws configuration is required when platform is AWS, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''Azure'' ? has(self.azure) : !has(self.azure)' + message: azure configuration is required when platform is Azure, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''GCP'' ? has(self.gcp) : !has(self.gcp)' + message: gcp configuration is required when platform is GCP, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''OpenStack'' ? has(self.openstack) : !has(self.openstack)' + message: openstack configuration is required when platform is OpenStack, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''Nutanix'' ? has(self.nutanix) : !has(self.nutanix)' + message: nutanix configuration is required when platform is Nutanix, and forbidden otherwise + metadata: + description: 'ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector.' + type: object + required: + - labels + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. This field must contain both the ''machine.openshift.io/cluster-api-machine-role'' and ''machine.openshift.io/cluster-api-machine-type'' labels, both with a value of ''master''. It must also contain a label with the key ''machine.openshift.io/cluster-api-cluster''.' + type: object + additionalProperties: + type: string + x-kubernetes-validations: + - rule: '''machine.openshift.io/cluster-api-machine-role'' in self && self[''machine.openshift.io/cluster-api-machine-role''] == ''master''' + message: label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master' + - rule: '''machine.openshift.io/cluster-api-machine-type'' in self && self[''machine.openshift.io/cluster-api-machine-type''] == ''master''' + message: label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master' + - rule: '''machine.openshift.io/cluster-api-cluster'' in self' + message: label 'machine.openshift.io/cluster-api-cluster' is required + spec: + description: Spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field. + type: object + properties: + lifecycleHooks: + description: LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + type: object + properties: + preDrain: + description: PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + preTerminate: + description: PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' + type: string + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + type: object + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + type: array + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + type: object + required: + - effect + - key + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + type: string + format: date-time + value: + description: The taint value corresponding to the taint key. + type: string + x-kubernetes-validations: + - rule: 'has(self.machineType) && self.machineType == ''machines_v1beta1_machine_openshift_io'' ? has(self.machines_v1beta1_machine_openshift_io) : !has(self.machines_v1beta1_machine_openshift_io)' + message: machines_v1beta1_machine_openshift_io configuration is required when machineType is machines_v1beta1_machine_openshift_io, and forbidden otherwise + status: + description: ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD. + type: object + properties: + conditions: + description: 'Conditions represents the observations of the ControlPlaneMachineSet''s current state. Known .status.conditions.type are: Available, Degraded and Progressing.' + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: ObservedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server. + type: integer + format: int64 + readyReplicas: + description: ReadyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress. + type: integer + format: int32 + replicas: + description: Replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count. + type: integer + format: int32 + unavailableReplicas: + description: UnavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas. + type: integer + format: int32 + updatedReplicas: + description: UpdatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready. + type: integer + format: int32 + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset.crd.yaml b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-Default.crd.yaml similarity index 96% rename from vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset.crd.yaml rename to vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-Default.crd.yaml index 97c0ae6009..906aca90d7 100644 --- a/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset.crd.yaml +++ b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-Default.crd.yaml @@ -6,6 +6,7 @@ metadata: api-approved.openshift.io: https://github.com/openshift/api/pull/1112 exclude.release.openshift.io/internal-openshift-hosted: "true" include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: Default creationTimestamp: null name: controlplanemachinesets.machine.openshift.io spec: @@ -249,6 +250,24 @@ spec: zone: description: Zone is the zone in which the GCP machine provider will create the VM. type: string + nutanix: + description: nutanix configures failure domain information for the Nutanix platform. + type: array + items: + description: NutanixFailureDomainReference refers to the failure domain of the Nutanix platform. + type: object + required: + - name + properties: + name: + description: name of the failure domain in which the nutanix machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. + type: string + maxLength: 64 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map openstack: description: OpenStack configures failure domain information for the OpenStack platform. type: array @@ -284,7 +303,7 @@ spec: - rule: '!has(self.availabilityZone) || !has(self.rootVolume) || has(self.rootVolume.availabilityZone)' message: rootVolume.availabilityZone is required when availabilityZone is set platform: - description: Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, and GCP. + description: Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix. type: string enum: - "" @@ -313,6 +332,8 @@ spec: message: gcp configuration is required when platform is GCP, and forbidden otherwise - rule: 'has(self.platform) && self.platform == ''OpenStack'' ? has(self.openstack) : !has(self.openstack)' message: openstack configuration is required when platform is OpenStack, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''Nutanix'' ? has(self.nutanix) : !has(self.nutanix)' + message: nutanix configuration is required when platform is Nutanix, and forbidden otherwise metadata: description: 'ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector.' type: object diff --git a/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..442ae307a2 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,604 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1112 + capability.openshift.io/name: MachineAPI + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + creationTimestamp: null + name: controlplanemachinesets.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: ControlPlaneMachineSet + listKind: ControlPlaneMachineSetList + plural: controlplanemachinesets + singular: controlplanemachineset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Desired Replicas + jsonPath: .spec.replicas + name: Desired + type: integer + - description: Current Replicas + jsonPath: .status.replicas + name: Current + type: integer + - description: Ready Replicas + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Updated Replicas + jsonPath: .status.updatedReplicas + name: Updated + type: integer + - description: Observed number of unavailable replicas + jsonPath: .status.unavailableReplicas + name: Unavailable + type: integer + - description: ControlPlaneMachineSet state + jsonPath: .spec.state + name: State + type: string + - description: ControlPlaneMachineSet age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: 'ControlPlaneMachineSet ensures that a specified number of control plane machine replicas are running at any given time. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet. + type: object + required: + - replicas + - selector + - template + properties: + replicas: + description: Replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field. + type: integer + format: int32 + default: 3 + enum: + - 3 + - 5 + x-kubernetes-validations: + - rule: self == oldSelf + message: replicas is immutable + selector: + description: Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - rule: self == oldSelf + message: selector is immutable + state: + description: State defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet. + type: string + default: Inactive + enum: + - Active + - Inactive + x-kubernetes-validations: + - rule: oldSelf != 'Active' || self == oldSelf + message: state cannot be changed once Active + strategy: + description: Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec. + type: object + default: + type: RollingUpdate + properties: + type: + description: Type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are "RollingUpdate" and "OnDelete". The current default value is "RollingUpdate". + type: string + default: RollingUpdate + enum: + - RollingUpdate + - OnDelete + template: + description: Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet. + type: object + required: + - machineType + properties: + machineType: + description: MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io. + type: string + enum: + - machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + description: OpenShiftMachineV1Beta1Machine defines the template for creating Machines from the v1beta1.machine.openshift.io API group. + type: object + required: + - metadata + - spec + properties: + failureDomains: + description: FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information. + type: object + required: + - platform + properties: + aws: + description: AWS configures failure domain information for the AWS platform. + type: array + items: + description: AWSFailureDomain configures failure domain information for the AWS platform. + type: object + minProperties: 1 + properties: + placement: + description: Placement configures the placement information for this instance. + type: object + required: + - availabilityZone + properties: + availabilityZone: + description: AvailabilityZone is the availability zone of the instance. + type: string + subnet: + description: Subnet is a reference to the subnet to use for this instance. + type: object + required: + - type + properties: + arn: + description: ARN of resource. + type: string + filters: + description: Filters is a set of filters used to identify a resource. + type: array + items: + description: AWSResourceFilter is a filter used to identify an AWS resource + type: object + required: + - name + properties: + name: + description: Name of the filter. Filter names are case-sensitive. + type: string + values: + description: Values includes one or more filter values. Filter values are case-sensitive. + type: array + items: + type: string + id: + description: ID of resource. + type: string + type: + description: Type determines how the reference will fetch the AWS resource. + type: string + enum: + - ID + - ARN + - Filters + x-kubernetes-validations: + - rule: 'has(self.type) && self.type == ''ID'' ? has(self.id) : !has(self.id)' + message: id is required when type is ID, and forbidden otherwise + - rule: 'has(self.type) && self.type == ''ARN'' ? has(self.arn) : !has(self.arn)' + message: arn is required when type is ARN, and forbidden otherwise + - rule: 'has(self.type) && self.type == ''Filters'' ? has(self.filters) : !has(self.filters)' + message: filters is required when type is Filters, and forbidden otherwise + azure: + description: Azure configures failure domain information for the Azure platform. + type: array + items: + description: AzureFailureDomain configures failure domain information for the Azure platform. + type: object + required: + - zone + properties: + subnet: + description: subnet is the name of the network subnet in which the VM will be created. When omitted, the subnet value from the machine providerSpec template will be used. + type: string + maxLength: 80 + pattern: ^[a-zA-Z0-9](?:[a-zA-Z0-9._-]*[a-zA-Z0-9_])?$ + zone: + description: Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone. + type: string + gcp: + description: GCP configures failure domain information for the GCP platform. + type: array + items: + description: GCPFailureDomain configures failure domain information for the GCP platform + type: object + required: + - zone + properties: + zone: + description: Zone is the zone in which the GCP machine provider will create the VM. + type: string + nutanix: + description: nutanix configures failure domain information for the Nutanix platform. + type: array + items: + description: NutanixFailureDomainReference refers to the failure domain of the Nutanix platform. + type: object + required: + - name + properties: + name: + description: name of the failure domain in which the nutanix machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. + type: string + maxLength: 64 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + openstack: + description: OpenStack configures failure domain information for the OpenStack platform. + type: array + items: + description: OpenStackFailureDomain configures failure domain information for the OpenStack platform. + type: object + minProperties: 1 + properties: + availabilityZone: + description: 'availabilityZone is the nova availability zone in which the OpenStack machine provider will create the VM. If not specified, the VM will be created in the default availability zone specified in the nova configuration. Availability zone names must NOT contain : since it is used by admin users to specify hosts where instances are launched in server creation. Also, it must not contain spaces otherwise it will lead to node that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits.' + type: string + maxLength: 63 + minLength: 1 + pattern: '^[^: ]*$' + rootVolume: + description: rootVolume contains settings that will be used by the OpenStack machine provider to create the root volume attached to the VM. If not specified, no root volume will be created. + type: object + required: + - volumeType + properties: + availabilityZone: + description: availabilityZone specifies the Cinder availability zone where the root volume will be created. If not specifified, the root volume will be created in the availability zone specified by the volume type in the cinder configuration. If the volume type (configured in the OpenStack cluster) does not specify an availability zone, the root volume will be created in the default availability zone specified in the cinder configuration. See https://docs.openstack.org/cinder/latest/admin/availability-zone-type.html for more details. If the OpenStack cluster is deployed with the cross_az_attach configuration option set to false, the root volume will have to be in the same availability zone as the VM (defined by OpenStackFailureDomain.AvailabilityZone). Availability zone names must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits. + type: string + maxLength: 63 + minLength: 1 + pattern: ^[^ ]*$ + volumeType: + description: volumeType specifies the type of the root volume that will be provisioned. The maximum length of a volume type name is 255 characters, as per the OpenStack limit. + type: string + maxLength: 255 + minLength: 1 + x-kubernetes-validations: + - rule: '!has(self.availabilityZone) || !has(self.rootVolume) || has(self.rootVolume.availabilityZone)' + message: rootVolume.availabilityZone is required when availabilityZone is set + platform: + description: Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix. + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + vsphere: + description: vsphere configures failure domain information for the VSphere platform. + type: array + items: + description: VSphereFailureDomain configures failure domain information for the vSphere platform + type: object + required: + - name + properties: + name: + description: name of the failure domain in which the vSphere machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. When balancing machines across failure domains, the control plane machine set will inject configuration from the Infrastructure resource into the machine providerSpec to allocate the machine to a failure domain. + type: string + x-kubernetes-validations: + - rule: 'has(self.platform) && self.platform == ''VSphere'' ? has(self.vsphere) : !has(self.vsphere)' + message: vsphere configuration is required when platform is VSphere, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''AWS'' ? has(self.aws) : !has(self.aws)' + message: aws configuration is required when platform is AWS, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''Azure'' ? has(self.azure) : !has(self.azure)' + message: azure configuration is required when platform is Azure, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''GCP'' ? has(self.gcp) : !has(self.gcp)' + message: gcp configuration is required when platform is GCP, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''OpenStack'' ? has(self.openstack) : !has(self.openstack)' + message: openstack configuration is required when platform is OpenStack, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''Nutanix'' ? has(self.nutanix) : !has(self.nutanix)' + message: nutanix configuration is required when platform is Nutanix, and forbidden otherwise + metadata: + description: 'ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector.' + type: object + required: + - labels + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. This field must contain both the ''machine.openshift.io/cluster-api-machine-role'' and ''machine.openshift.io/cluster-api-machine-type'' labels, both with a value of ''master''. It must also contain a label with the key ''machine.openshift.io/cluster-api-cluster''.' + type: object + additionalProperties: + type: string + x-kubernetes-validations: + - rule: '''machine.openshift.io/cluster-api-machine-role'' in self && self[''machine.openshift.io/cluster-api-machine-role''] == ''master''' + message: label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master' + - rule: '''machine.openshift.io/cluster-api-machine-type'' in self && self[''machine.openshift.io/cluster-api-machine-type''] == ''master''' + message: label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master' + - rule: '''machine.openshift.io/cluster-api-cluster'' in self' + message: label 'machine.openshift.io/cluster-api-cluster' is required + spec: + description: Spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field. + type: object + properties: + lifecycleHooks: + description: LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + type: object + properties: + preDrain: + description: PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + preTerminate: + description: PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' + type: string + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + type: object + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + type: array + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + type: object + required: + - effect + - key + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + type: string + format: date-time + value: + description: The taint value corresponding to the taint key. + type: string + x-kubernetes-validations: + - rule: 'has(self.machineType) && self.machineType == ''machines_v1beta1_machine_openshift_io'' ? has(self.machines_v1beta1_machine_openshift_io) : !has(self.machines_v1beta1_machine_openshift_io)' + message: machines_v1beta1_machine_openshift_io configuration is required when machineType is machines_v1beta1_machine_openshift_io, and forbidden otherwise + status: + description: ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD. + type: object + properties: + conditions: + description: 'Conditions represents the observations of the ControlPlaneMachineSet''s current state. Known .status.conditions.type are: Available, Degraded and Progressing.' + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: ObservedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server. + type: integer + format: int64 + readyReplicas: + description: ReadyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress. + type: integer + format: int32 + replicas: + description: Replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count. + type: integer + format: int32 + unavailableReplicas: + description: UnavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas. + type: integer + format: int32 + updatedReplicas: + description: UpdatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready. + type: integer + format: int32 + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1/custom.controlplanemachineset.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/custom.controlplanemachineset.testsuite.yaml new file mode 100644 index 0000000000..350be2bf31 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/custom.controlplanemachineset.testsuite.yaml @@ -0,0 +1,50 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[CustomNoUpgrade] ControlPlaneMachineSet" +crd: 0000_10_controlplanemachineset-CustomNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should reject a VSphere platform failure domain without any VSphere config + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: VSphere + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains: Invalid value: \"object\": vsphere configuration is required when platform is VSphere" + - name: Should reject a VSphere configured failure domain without a platform type + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + vsphere: + - name: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.platform: Required value" diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.aws.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.aws.testsuite.yaml index 07a5ec7c13..f599fcc2db 100644 --- a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.aws.testsuite.yaml +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.aws.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] ControlPlaneMachineSet (AWS)" -crd: 0000_10_controlplanemachineset.crd.yaml +crd: 0000_10_controlplanemachineset-Default.crd.yaml tests: onCreate: - name: Should reject an AWS platform failure domain without any AWS config diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.azure.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.azure.testsuite.yaml index 191bf65f2e..6b6295e536 100644 --- a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.azure.testsuite.yaml +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.azure.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] ControlPlaneMachineSet" -crd: 0000_10_controlplanemachineset.crd.yaml +crd: 0000_10_controlplanemachineset-Default.crd.yaml tests: onCreate: - name: Should reject an Azure platform failure domain without any Azure config diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.gcp.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.gcp.testsuite.yaml index 518625f915..24e617286c 100644 --- a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.gcp.testsuite.yaml +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.gcp.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] ControlPlaneMachineSet" -crd: 0000_10_controlplanemachineset.crd.yaml +crd: 0000_10_controlplanemachineset-Default.crd.yaml tests: onCreate: - name: Should reject an GCP platform failure domain without any GCP config diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.openstack.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.openstack.testsuite.yaml index a09de51e0f..ae65d8f7f4 100644 --- a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.openstack.testsuite.yaml +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.openstack.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] ControlPlaneMachineSet" -crd: 0000_10_controlplanemachineset.crd.yaml +crd: 0000_10_controlplanemachineset-Default.crd.yaml tests: onCreate: - name: Should reject an OpenStack platform failure domain without any OpenStack config diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.testsuite.yaml index 3e65b31f64..dd4f09ccf6 100644 --- a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.testsuite.yaml +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] ControlPlaneMachineSet" -crd: 0000_10_controlplanemachineset.crd.yaml +crd: 0000_10_controlplanemachineset-Default.crd.yaml tests: onCreate: - name: Should be able to create a minimal ControlPlaneMachineSet diff --git a/vendor/github.com/openshift/api/machine/v1/techpreview.controlplanemachineset.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/techpreview.controlplanemachineset.testsuite.yaml new file mode 100644 index 0000000000..519b0cad2c --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/techpreview.controlplanemachineset.testsuite.yaml @@ -0,0 +1,50 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreviewNoUpgrade] ControlPlaneMachineSet" +crd: 0000_10_controlplanemachineset-TechPreviewNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should reject an VSphere platform failure domain without any VSphere config + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: VSphere + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains: Invalid value: \"object\": vsphere configuration is required when platform is VSphere" + - name: Should reject an VSphere configured failure domain without a platform type + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + vsphere: + - name: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.platform: Required value" diff --git a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go index e46c39b552..42e2b83fa1 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go +++ b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go @@ -138,7 +138,7 @@ type OpenShiftMachineV1Beta1MachineTemplate struct { // This will be merged into the ProviderSpec given in the template. // This field is optional on platforms that do not require placement information. // +optional - FailureDomains FailureDomains `json:"failureDomains,omitempty"` + FailureDomains *FailureDomains `json:"failureDomains,omitempty"` // ObjectMeta is the standard object metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata @@ -231,9 +231,11 @@ const ( // +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'Azure' ? has(self.azure) : !has(self.azure)",message="azure configuration is required when platform is Azure, and forbidden otherwise" // +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'GCP' ? has(self.gcp) : !has(self.gcp)",message="gcp configuration is required when platform is GCP, and forbidden otherwise" // +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'OpenStack' ? has(self.openstack) : !has(self.openstack)",message="openstack configuration is required when platform is OpenStack, and forbidden otherwise" +// +openshift:validation:FeatureSetAwareXValidation:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,rule="has(self.platform) && self.platform == 'VSphere' ? has(self.vsphere) : !has(self.vsphere)",message="vsphere configuration is required when platform is VSphere, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'Nutanix' ? has(self.nutanix) : !has(self.nutanix)",message="nutanix configuration is required when platform is Nutanix, and forbidden otherwise" type FailureDomains struct { // Platform identifies the platform for which the FailureDomain represents. - // Currently supported values are AWS, Azure, and GCP. + // Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix. // +unionDiscriminator // +kubebuilder:validation:Required Platform configv1.PlatformType `json:"platform"` @@ -250,6 +252,11 @@ type FailureDomains struct { // +optional GCP *[]GCPFailureDomain `json:"gcp,omitempty"` + // vsphere configures failure domain information for the VSphere platform. + // +optional + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + VSphere []VSphereFailureDomain `json:"vsphere,omitempty"` + // OpenStack configures failure domain information for the OpenStack platform. // +optional // @@ -260,6 +267,12 @@ type FailureDomains struct { // + of nil if it would be a pointer. // +optional OpenStack []OpenStackFailureDomain `json:"openstack,omitempty"` + + // nutanix configures failure domain information for the Nutanix platform. + // +listType=map + // +listMapKey=name + // +optional + Nutanix []NutanixFailureDomainReference `json:"nutanix,omitempty"` } // AWSFailureDomain configures failure domain information for the AWS platform. @@ -303,6 +316,16 @@ type GCPFailureDomain struct { Zone string `json:"zone"` } +// VSphereFailureDomain configures failure domain information for the vSphere platform +type VSphereFailureDomain struct { + // name of the failure domain in which the vSphere machine provider will create the VM. + // Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. + // When balancing machines across failure domains, the control plane machine set will inject configuration from the + // Infrastructure resource into the machine providerSpec to allocate the machine to a failure domain. + // +kubebuilder:validation:Required + Name string `json:"name"` +} + // OpenStackFailureDomain configures failure domain information for the OpenStack platform. // +kubebuilder:validation:MinProperties:=1 // +kubebuilder:validation:XValidation:rule="!has(self.availabilityZone) || !has(self.rootVolume) || has(self.rootVolume.availabilityZone)",message="rootVolume.availabilityZone is required when availabilityZone is set" @@ -329,6 +352,17 @@ type OpenStackFailureDomain struct { RootVolume *RootVolume `json:"rootVolume,omitempty"` } +// NutanixFailureDomainReference refers to the failure domain of the Nutanix platform. +type NutanixFailureDomainReference struct { + // name of the failure domain in which the nutanix machine provider will create the VM. + // Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:Pattern=`[a-z0-9]([-a-z0-9]*[a-z0-9])?` + Name string `json:"name"` +} + // RootVolume represents the volume metadata to boot from. // The original RootVolume struct is defined in the v1alpha1 but it's not best practice to use it directly here so we define a new one // that should stay in sync with the original one. diff --git a/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go index fc7db6be67..1370ebdd28 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go +++ b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go @@ -86,6 +86,14 @@ type NutanixMachineProviderConfig struct { // credentials data to access Nutanix PC client // +kubebuilder:validation:Required CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret"` + + // failureDomain refers to the name of the FailureDomain with which this Machine is associated. + // If this is configured, the Nutanix machine controller will use the prism_central endpoint + // and credentials defined in the referenced FailureDomain to communicate to the prism_central. + // It will also verify that the 'cluster' and subnets' configuration in the NutanixMachineProviderConfig + // is consistent with that in the referenced failureDomain. + // +optional + FailureDomain *NutanixFailureDomainReference `json:"failureDomain"` } // NutanixCategory identifies a pair of prism category key and value diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go index ca31843272..ffd8e951f2 100644 --- a/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go @@ -530,6 +530,11 @@ func (in *FailureDomains) DeepCopyInto(out *FailureDomains) { copy(*out, *in) } } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = make([]VSphereFailureDomain, len(*in)) + copy(*out, *in) + } if in.OpenStack != nil { in, out := &in.OpenStack, &out.OpenStack *out = make([]OpenStackFailureDomain, len(*in)) @@ -537,6 +542,11 @@ func (in *FailureDomains) DeepCopyInto(out *FailureDomains) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Nutanix != nil { + in, out := &in.Nutanix, &out.Nutanix + *out = make([]NutanixFailureDomainReference, len(*in)) + copy(*out, *in) + } return } @@ -598,6 +608,22 @@ func (in *NutanixCategory) DeepCopy() *NutanixCategory { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixFailureDomainReference) DeepCopyInto(out *NutanixFailureDomainReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixFailureDomainReference. +func (in *NutanixFailureDomainReference) DeepCopy() *NutanixFailureDomainReference { + if in == nil { + return nil + } + out := new(NutanixFailureDomainReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NutanixMachineProviderConfig) DeepCopyInto(out *NutanixMachineProviderConfig) { *out = *in @@ -630,6 +656,11 @@ func (in *NutanixMachineProviderConfig) DeepCopyInto(out *NutanixMachineProvider *out = new(corev1.LocalObjectReference) **out = **in } + if in.FailureDomain != nil { + in, out := &in.FailureDomain, &out.FailureDomain + *out = new(NutanixFailureDomainReference) + **out = **in + } return } @@ -717,7 +748,11 @@ func (in *NutanixResourceIdentifier) DeepCopy() *NutanixResourceIdentifier { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpenShiftMachineV1Beta1MachineTemplate) DeepCopyInto(out *OpenShiftMachineV1Beta1MachineTemplate) { *out = *in - in.FailureDomains.DeepCopyInto(&out.FailureDomains) + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = new(FailureDomains) + (*in).DeepCopyInto(*out) + } in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) return @@ -940,3 +975,19 @@ func (in *Tag) DeepCopy() *Tag { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomain) DeepCopyInto(out *VSphereFailureDomain) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomain. +func (in *VSphereFailureDomain) DeepCopy() *VSphereFailureDomain { + if in == nil { + return nil + } + out := new(VSphereFailureDomain) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go index 6406220e7d..44fed0c1e1 100644 --- a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go @@ -244,11 +244,13 @@ func (ControlPlaneMachineSetTemplateObjectMeta) SwaggerDoc() map[string]string { var map_FailureDomains = map[string]string{ "": "FailureDomain represents the different configurations required to spread Machines across failure domains on different platforms.", - "platform": "Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, and GCP.", + "platform": "Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix.", "aws": "AWS configures failure domain information for the AWS platform.", "azure": "Azure configures failure domain information for the Azure platform.", "gcp": "GCP configures failure domain information for the GCP platform.", + "vsphere": "vsphere configures failure domain information for the VSphere platform.", "openstack": "OpenStack configures failure domain information for the OpenStack platform.", + "nutanix": "nutanix configures failure domain information for the Nutanix platform.", } func (FailureDomains) SwaggerDoc() map[string]string { @@ -264,6 +266,15 @@ func (GCPFailureDomain) SwaggerDoc() map[string]string { return map_GCPFailureDomain } +var map_NutanixFailureDomainReference = map[string]string{ + "": "NutanixFailureDomainReference refers to the failure domain of the Nutanix platform.", + "name": "name of the failure domain in which the nutanix machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource.", +} + +func (NutanixFailureDomainReference) SwaggerDoc() map[string]string { + return map_NutanixFailureDomainReference +} + var map_OpenShiftMachineV1Beta1MachineTemplate = map[string]string{ "": "OpenShiftMachineV1Beta1MachineTemplate is a template for the ControlPlaneMachineSet to create Machines from the v1beta1.machine.openshift.io API group.", "failureDomains": "FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information.", @@ -295,6 +306,15 @@ func (RootVolume) SwaggerDoc() map[string]string { return map_RootVolume } +var map_VSphereFailureDomain = map[string]string{ + "": "VSphereFailureDomain configures failure domain information for the vSphere platform", + "name": "name of the failure domain in which the vSphere machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. When balancing machines across failure domains, the control plane machine set will inject configuration from the Infrastructure resource into the machine providerSpec to allocate the machine to a failure domain.", +} + +func (VSphereFailureDomain) SwaggerDoc() map[string]string { + return map_VSphereFailureDomain +} + var map_NutanixCategory = map[string]string{ "": "NutanixCategory identifies a pair of prism category key and value", "key": "key is the prism category key name", @@ -320,6 +340,7 @@ var map_NutanixMachineProviderConfig = map[string]string{ "categories": "categories optionally adds one or more prism categories (each with key and value) for the Machine's VM to associate with. All the category key and value pairs specified must already exist in the prism central.", "userDataSecret": "userDataSecret is a local reference to a secret that contains the UserData to apply to the VM", "credentialsSecret": "credentialsSecret is a local reference to a secret that contains the credentials data to access Nutanix PC client", + "failureDomain": "failureDomain refers to the name of the FailureDomain with which this Machine is associated. If this is configured, the Nutanix machine controller will use the prism_central endpoint and credentials defined in the referenced FailureDomain to communicate to the prism_central. It will also verify that the 'cluster' and subnets' configuration in the NutanixMachineProviderConfig is consistent with that in the referenced failureDomain.", } func (NutanixMachineProviderConfig) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go b/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go index e3dd4d0a02..da5fbc5152 100644 --- a/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go +++ b/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go @@ -92,6 +92,12 @@ type OpenstackProviderSpec struct { // The volume metadata to boot from RootVolume *RootVolume `json:"rootVolume,omitempty"` + // additionalBlockDevices is a list of specifications for additional block devices to attach to the server instance + // +optional + // +listType=map + // +listMapKey=name + AdditionalBlockDevices []AdditionalBlockDevice `json:"additionalBlockDevices,omitempty"` + // The server group to assign the machine to. ServerGroupID string `json:"serverGroupID,omitempty"` @@ -366,3 +372,68 @@ type RootVolume struct { // Deprecated: deviceType will be silently ignored. There is no replacement. DeprecatedDeviceType string `json:"deviceType,omitempty"` } + +// blockDeviceStorage is the storage type of a block device to create and +// contains additional storage options. +// +union +type BlockDeviceStorage struct { + // type is the type of block device to create. + // This can be either "Volume" or "Local". + // +kubebuilder:validation:Required + // +unionDiscriminator + Type BlockDeviceType `json:"type"` + + // volume contains additional storage options for a volume block device. + // +optional + // +unionMember,optional + Volume *BlockDeviceVolume `json:"volume,omitempty"` +} + +// blockDeviceVolume contains additional storage options for a volume block device. +type BlockDeviceVolume struct { + // type is the Cinder volume type of the volume. + // If omitted, the default Cinder volume type that is configured in the OpenStack cloud + // will be used. + // +optional + Type string `json:"type,omitempty"` + + // availabilityZone is the volume availability zone to create the volume in. + // If omitted, the availability zone of the server will be used. + // The availability zone must NOT contain spaces otherwise it will lead to volume that belongs + // to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for + // further information. + // +optional + AvailabilityZone string `json:"availabilityZone,omitempty"` +} + +// additionalBlockDevice is a block device to attach to the server. +type AdditionalBlockDevice struct { + // name of the block device in the context of a machine. + // If the block device is a volume, the Cinder volume will be named + // as a combination of the machine name and this name. + // Also, this name will be used for tagging the block device. + // Information about the block device tag can be obtained from the OpenStack + // metadata API or the config drive. + // +kubebuilder:validation:Required + Name string `json:"name"` + + // sizeGiB is the size of the block device in gibibytes (GiB). + // +kubebuilder:validation:Required + SizeGiB int `json:"sizeGiB"` + + // storage specifies the storage type of the block device and + // additional storage options. + // +kubebuilder:validation:Required + Storage BlockDeviceStorage `json:"storage"` +} + +// BlockDeviceType defines the type of block device to create. +type BlockDeviceType string + +const ( + // LocalBlockDevice is an ephemeral block device attached to the server. + LocalBlockDevice BlockDeviceType = "Local" + + // VolumeBlockDevice is a volume block device attached to the server. + VolumeBlockDevice BlockDeviceType = "Volume" +) diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go index 7210713e38..f61b35ab44 100644 --- a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go @@ -10,6 +10,23 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalBlockDevice) DeepCopyInto(out *AdditionalBlockDevice) { + *out = *in + in.Storage.DeepCopyInto(&out.Storage) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalBlockDevice. +func (in *AdditionalBlockDevice) DeepCopy() *AdditionalBlockDevice { + if in == nil { + return nil + } + out := new(AdditionalBlockDevice) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AddressPair) DeepCopyInto(out *AddressPair) { *out = *in @@ -26,6 +43,43 @@ func (in *AddressPair) DeepCopy() *AddressPair { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceStorage) DeepCopyInto(out *BlockDeviceStorage) { + *out = *in + if in.Volume != nil { + in, out := &in.Volume, &out.Volume + *out = new(BlockDeviceVolume) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceStorage. +func (in *BlockDeviceStorage) DeepCopy() *BlockDeviceStorage { + if in == nil { + return nil + } + out := new(BlockDeviceStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceVolume) DeepCopyInto(out *BlockDeviceVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceVolume. +func (in *BlockDeviceVolume) DeepCopy() *BlockDeviceVolume { + if in == nil { + return nil + } + out := new(BlockDeviceVolume) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Filter) DeepCopyInto(out *Filter) { *out = *in @@ -165,6 +219,13 @@ func (in *OpenstackProviderSpec) DeepCopyInto(out *OpenstackProviderSpec) { *out = new(RootVolume) **out = **in } + if in.AdditionalBlockDevices != nil { + in, out := &in.AdditionalBlockDevices, &out.AdditionalBlockDevices + *out = make([]AdditionalBlockDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go index 3ea9595d26..c8094eb269 100644 --- a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go @@ -11,6 +11,37 @@ package v1alpha1 // Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_AdditionalBlockDevice = map[string]string{ + "": "additionalBlockDevice is a block device to attach to the server.", + "name": "name of the block device in the context of a machine. If the block device is a volume, the Cinder volume will be named as a combination of the machine name and this name. Also, this name will be used for tagging the block device. Information about the block device tag can be obtained from the OpenStack metadata API or the config drive.", + "sizeGiB": "sizeGiB is the size of the block device in gibibytes (GiB).", + "storage": "storage specifies the storage type of the block device and additional storage options.", +} + +func (AdditionalBlockDevice) SwaggerDoc() map[string]string { + return map_AdditionalBlockDevice +} + +var map_BlockDeviceStorage = map[string]string{ + "": "blockDeviceStorage is the storage type of a block device to create and contains additional storage options.", + "type": "type is the type of block device to create. This can be either \"Volume\" or \"Local\".", + "volume": "volume contains additional storage options for a volume block device.", +} + +func (BlockDeviceStorage) SwaggerDoc() map[string]string { + return map_BlockDeviceStorage +} + +var map_BlockDeviceVolume = map[string]string{ + "": "blockDeviceVolume contains additional storage options for a volume block device.", + "type": "type is the Cinder volume type of the volume. If omitted, the default Cinder volume type that is configured in the OpenStack cloud will be used.", + "availabilityZone": "availabilityZone is the volume availability zone to create the volume in. If omitted, the availability zone of the server will be used. The availability zone must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information.", +} + +func (BlockDeviceVolume) SwaggerDoc() map[string]string { + return map_BlockDeviceVolume +} + var map_Filter = map[string]string{ "id": "Deprecated: use NetworkParam.uuid instead. Ignored if NetworkParam.uuid is set.", "name": "name filters networks by name.", @@ -60,28 +91,29 @@ func (NetworkParam) SwaggerDoc() map[string]string { } var map_OpenstackProviderSpec = map[string]string{ - "": "OpenstackProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an OpenStack Instance. It is used by the Openstack machine actuator to create a single machine instance. Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "cloudsSecret": "The name of the secret containing the openstack credentials", - "cloudName": "The name of the cloud to use from the clouds secret", - "flavor": "The flavor reference for the flavor for your server instance.", - "image": "The name of the image to use for your server instance. If the RootVolume is specified, this will be ignored and use rootVolume directly.", - "keyName": "The ssh key to inject in the instance", - "sshUserName": "The machine ssh username", - "networks": "A networks object. Required parameter when there are multiple networks defined for the tenant. When you do not specify the networks parameter, the server attaches to the only network created for the current tenant.", - "ports": "Create and assign additional ports to instances", - "floatingIP": "floatingIP specifies a floating IP to be associated with the machine. Note that it is not safe to use this parameter in a MachineSet, as only one Machine may be assigned the same floating IP.\n\nDeprecated: floatingIP will be removed in a future release as it cannot be implemented correctly.", - "availabilityZone": "The availability zone from which to launch the server.", - "securityGroups": "The names of the security groups to assign to the instance", - "userDataSecret": "The name of the secret containing the user data (startup script in most cases)", - "trunk": "Whether the server instance is created on a trunk port or not.", - "tags": "Machine tags Requires Nova api 2.52 minimum!", - "serverMetadata": "Metadata mapping. Allows you to create a map of key value pairs to add to the server instance.", - "configDrive": "Config Drive support", - "rootVolume": "The volume metadata to boot from", - "serverGroupID": "The server group to assign the machine to.", - "serverGroupName": "The server group to assign the machine to. A server group with that name will be created if it does not exist. If both ServerGroupID and ServerGroupName are non-empty, they must refer to the same OpenStack resource.", - "primarySubnet": "The subnet that a set of machines will get ingress/egress traffic from", + "": "OpenstackProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an OpenStack Instance. It is used by the Openstack machine actuator to create a single machine instance. Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "cloudsSecret": "The name of the secret containing the openstack credentials", + "cloudName": "The name of the cloud to use from the clouds secret", + "flavor": "The flavor reference for the flavor for your server instance.", + "image": "The name of the image to use for your server instance. If the RootVolume is specified, this will be ignored and use rootVolume directly.", + "keyName": "The ssh key to inject in the instance", + "sshUserName": "The machine ssh username", + "networks": "A networks object. Required parameter when there are multiple networks defined for the tenant. When you do not specify the networks parameter, the server attaches to the only network created for the current tenant.", + "ports": "Create and assign additional ports to instances", + "floatingIP": "floatingIP specifies a floating IP to be associated with the machine. Note that it is not safe to use this parameter in a MachineSet, as only one Machine may be assigned the same floating IP.\n\nDeprecated: floatingIP will be removed in a future release as it cannot be implemented correctly.", + "availabilityZone": "The availability zone from which to launch the server.", + "securityGroups": "The names of the security groups to assign to the instance", + "userDataSecret": "The name of the secret containing the user data (startup script in most cases)", + "trunk": "Whether the server instance is created on a trunk port or not.", + "tags": "Machine tags Requires Nova api 2.52 minimum!", + "serverMetadata": "Metadata mapping. Allows you to create a map of key value pairs to add to the server instance.", + "configDrive": "Config Drive support", + "rootVolume": "The volume metadata to boot from", + "additionalBlockDevices": "additionalBlockDevices is a list of specifications for additional block devices to attach to the server instance", + "serverGroupID": "The server group to assign the machine to.", + "serverGroupName": "The server group to assign the machine to. A server group with that name will be created if it does not exist. If both ServerGroupID and ServerGroupName are non-empty, they must refer to the same OpenStack resource.", + "primarySubnet": "The subnet that a set of machines will get ingress/egress traffic from", } func (OpenstackProviderSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/network/.codegen.yaml b/vendor/github.com/openshift/api/network/.codegen.yaml new file mode 100644 index 0000000000..55f3a272c8 --- /dev/null +++ b/vendor/github.com/openshift/api/network/.codegen.yaml @@ -0,0 +1,6 @@ +schemapatch: + requiredFeatureSets: + - "" + - "Default" + - "TechPreviewNoUpgrade" + - "CustomNoUpgrade" diff --git a/vendor/github.com/openshift/api/network/install.go b/vendor/github.com/openshift/api/network/install.go index 85bc706236..fbaa079b3f 100644 --- a/vendor/github.com/openshift/api/network/install.go +++ b/vendor/github.com/openshift/api/network/install.go @@ -5,6 +5,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" networkv1 "github.com/openshift/api/network/v1" + networkv1alpha1 "github.com/openshift/api/network/v1alpha1" ) const ( @@ -12,7 +13,7 @@ const ( ) var ( - schemeBuilder = runtime.NewSchemeBuilder(networkv1.Install) + schemeBuilder = runtime.NewSchemeBuilder(networkv1.Install, networkv1alpha1.Install) // Install is a function which adds every version of this group to a scheme Install = schemeBuilder.AddToScheme ) diff --git a/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-customnoupgrade.crd.yaml b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-customnoupgrade.crd.yaml new file mode 100644 index 0000000000..19ad00b875 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-customnoupgrade.crd.yaml @@ -0,0 +1,154 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1524 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: dnsnameresolvers.network.openshift.io +spec: + group: network.openshift.io + names: + kind: DNSNameResolver + listKind: DNSNameResolverList + plural: dnsnameresolvers + singular: dnsnameresolver + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "DNSNameResolver stores the DNS name resolution information of a DNS name. It can be enabled by the TechPreviewNoUpgrade feature set. It can also be enabled by the feature gate DNSNameResolver when using CustomNoUpgrade feature set. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the DNSNameResolver. + properties: + name: + description: name is the DNS name for which the DNS name resolution information will be stored. For a regular DNS name, only the DNS name resolution information of the regular DNS name will be stored. For a wildcard DNS name, the DNS name resolution information of all the DNS names that match the wildcard DNS name will be stored. For a wildcard DNS name, the '*' will match only one label. Additionally, only a single '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.' will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.' + maxLength: 254 + pattern: ^(\*\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\.){2,}$ + type: string + x-kubernetes-validations: + - message: spec.name is immutable + rule: self == oldSelf + required: + - name + type: object + status: + description: status is the most recently observed status of the DNSNameResolver. + properties: + resolvedNames: + description: resolvedNames contains a list of matching DNS names and their corresponding IP addresses along with their TTL and last DNS lookup times. + items: + description: DNSNameResolverResolvedName describes the details of a resolved DNS name. + properties: + conditions: + description: 'conditions provide information about the state of the DNS name. Known .status.conditions.type is: "Degraded". "Degraded" is true when the last resolution failed for the DNS name, and false otherwise.' + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + dnsName: + description: dnsName is the resolved DNS name matching the name field of DNSNameResolverSpec. This field can store both regular and wildcard DNS names which match the spec.name field. When the spec.name field contains a regular DNS name, this field will store the same regular DNS name after it is successfully resolved. When the spec.name field contains a wildcard DNS name, each resolvedName.dnsName will store the regular DNS names which match the wildcard DNS name and have been successfully resolved. If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard DNS name as well. + maxLength: 254 + pattern: ^(\*\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\.){2,}$ + type: string + resolutionFailures: + description: resolutionFailures keeps the count of how many consecutive times the DNS resolution failed for the dnsName. If the DNS resolution succeeds then the field will be set to zero. Upon every failure, the value of the field will be incremented by one. The details about the DNS name will be removed, if the value of resolutionFailures reaches 5 and the TTL of all the associated IP addresses have expired. + format: int32 + type: integer + resolvedAddresses: + description: resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last lookup times for the dnsName. + items: + description: DNSNameResolverResolvedAddress describes the details of an IP address for a resolved DNS name. + properties: + ip: + anyOf: + - format: ipv4 + - format: ipv6 + description: ip is an IP address associated with the dnsName. The validity of the IP address expires after lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon the expiration of the IP address's validity. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity. + type: string + lastLookupTime: + description: lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to the current time on a successful DNS lookup. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity. + format: date-time + type: string + ttlSeconds: + description: ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with the current time-to-live value. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity. + format: int32 + type: integer + required: + - ip + - lastLookupTime + - ttlSeconds + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + required: + - dnsName + - resolvedAddresses + type: object + type: array + x-kubernetes-list-map-keys: + - dnsName + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-customnoupgrade.crd.yaml-patch b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-customnoupgrade.crd.yaml-patch new file mode 100644 index 0000000000..975ae7c93f --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-customnoupgrade.crd.yaml-patch @@ -0,0 +1,5 @@ +- op: add + path: /spec/versions/name=v1alpha1/schema/openAPIV3Schema/properties/status/properties/resolvedNames/items/properties/resolvedAddresses/items/properties/ip/anyOf + value: + - format: ipv4 + - format: ipv6 diff --git a/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-techpreview.crd.yaml b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-techpreview.crd.yaml new file mode 100644 index 0000000000..e4c3c25412 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-techpreview.crd.yaml @@ -0,0 +1,154 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1524 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: dnsnameresolvers.network.openshift.io +spec: + group: network.openshift.io + names: + kind: DNSNameResolver + listKind: DNSNameResolverList + plural: dnsnameresolvers + singular: dnsnameresolver + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "DNSNameResolver stores the DNS name resolution information of a DNS name. It can be enabled by the TechPreviewNoUpgrade feature set. It can also be enabled by the feature gate DNSNameResolver when using CustomNoUpgrade feature set. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the DNSNameResolver. + properties: + name: + description: name is the DNS name for which the DNS name resolution information will be stored. For a regular DNS name, only the DNS name resolution information of the regular DNS name will be stored. For a wildcard DNS name, the DNS name resolution information of all the DNS names that match the wildcard DNS name will be stored. For a wildcard DNS name, the '*' will match only one label. Additionally, only a single '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.' will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.' + maxLength: 254 + pattern: ^(\*\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\.){2,}$ + type: string + x-kubernetes-validations: + - message: spec.name is immutable + rule: self == oldSelf + required: + - name + type: object + status: + description: status is the most recently observed status of the DNSNameResolver. + properties: + resolvedNames: + description: resolvedNames contains a list of matching DNS names and their corresponding IP addresses along with their TTL and last DNS lookup times. + items: + description: DNSNameResolverResolvedName describes the details of a resolved DNS name. + properties: + conditions: + description: 'conditions provide information about the state of the DNS name. Known .status.conditions.type is: "Degraded". "Degraded" is true when the last resolution failed for the DNS name, and false otherwise.' + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + dnsName: + description: dnsName is the resolved DNS name matching the name field of DNSNameResolverSpec. This field can store both regular and wildcard DNS names which match the spec.name field. When the spec.name field contains a regular DNS name, this field will store the same regular DNS name after it is successfully resolved. When the spec.name field contains a wildcard DNS name, each resolvedName.dnsName will store the regular DNS names which match the wildcard DNS name and have been successfully resolved. If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard DNS name as well. + maxLength: 254 + pattern: ^(\*\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\.){2,}$ + type: string + resolutionFailures: + description: resolutionFailures keeps the count of how many consecutive times the DNS resolution failed for the dnsName. If the DNS resolution succeeds then the field will be set to zero. Upon every failure, the value of the field will be incremented by one. The details about the DNS name will be removed, if the value of resolutionFailures reaches 5 and the TTL of all the associated IP addresses have expired. + format: int32 + type: integer + resolvedAddresses: + description: resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last lookup times for the dnsName. + items: + description: DNSNameResolverResolvedAddress describes the details of an IP address for a resolved DNS name. + properties: + ip: + anyOf: + - format: ipv4 + - format: ipv6 + description: ip is an IP address associated with the dnsName. The validity of the IP address expires after lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon the expiration of the IP address's validity. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity. + type: string + lastLookupTime: + description: lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to the current time on a successful DNS lookup. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity. + format: date-time + type: string + ttlSeconds: + description: ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with the current time-to-live value. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity. + format: int32 + type: integer + required: + - ip + - lastLookupTime + - ttlSeconds + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + required: + - dnsName + - resolvedAddresses + type: object + type: array + x-kubernetes-list-map-keys: + - dnsName + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-techpreview.crd.yaml-patch b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-techpreview.crd.yaml-patch new file mode 100644 index 0000000000..975ae7c93f --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/0000_70_dnsnameresolver_00-techpreview.crd.yaml-patch @@ -0,0 +1,5 @@ +- op: add + path: /spec/versions/name=v1alpha1/schema/openAPIV3Schema/properties/status/properties/resolvedNames/items/properties/resolvedAddresses/items/properties/ip/anyOf + value: + - format: ipv4 + - format: ipv6 diff --git a/vendor/github.com/openshift/api/network/v1alpha1/Makefile b/vendor/github.com/openshift/api/network/v1alpha1/Makefile new file mode 100644 index 0000000000..376fee2dc0 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="network.openshift.io/v1alpha1" diff --git a/vendor/github.com/openshift/api/network/v1alpha1/custom.dnsnameresolver.testsuite.yaml b/vendor/github.com/openshift/api/network/v1alpha1/custom.dnsnameresolver.testsuite.yaml new file mode 100644 index 0000000000..24175b6d73 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/custom.dnsnameresolver.testsuite.yaml @@ -0,0 +1,402 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[CustomNoUpgrade] DNSNameResolver" +crd: 0000_70_dnsnameresolver_00-techpreview.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal DNSNameResolver with a regular DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example.com. + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example.com. + - name: Should be able to create a minimal DNSNameResolver with a wildcard DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: "*.example.com." + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: "*.example.com." + - name: Should be able to specify DNS name with a '-' in a label + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example-domain.com. + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example-domain.com. + - name: Should not be able to specify invalid DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www_example_com + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"www_example_com\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify DNS name with a label starting with '-' + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: -example.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"-example.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify DNS name with a label ending with '-' + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: example-.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"example-.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify DNS name without a trailing period + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"www.example.com\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify just the TLD in a DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify a wildcard before TLD in DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: "*.com." + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"*.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify a DNS name with a label containing uppercase letters + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: ABCD.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"ABCD.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify a DNS name with a label containing more than 63 characters + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz123456789012.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz123456789012.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should be able to specify a DNS name with a label containing 63 characters + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz12345678901.com. + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz12345678901.com. + onUpdate: + - name: Should not be able to update spec.name field + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.newexample.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"string\": spec.name is immutable" + - name: Should be able to add valid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "192.168.1.1" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "192.168.1.1" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add lowest valid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "0.0.0.0" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "0.0.0.0" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add highest valid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "255.255.255.255" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "255.255.255.255" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should not be able to add invalid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "256.256.256.256" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expectedStatusError: "DNSNameResolver.network.openshift.io \"example\" is invalid: [: Invalid value: \"\": \"status.resolvedNames[0].resolvedAddresses[0].ip\" must validate at least one schema (anyOf), status.resolvedNames[0].resolvedAddresses[0].ip: Invalid value: \"256.256.256.256\": status.resolvedNames[0].resolvedAddresses[0].ip in body must be of type ipv4: \"256.256.256.256\", : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]" + - name: Should be able to add valid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "2001:db8:3333:4444:5555:6666:7777:8888" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "2001:db8:3333:4444:5555:6666:7777:8888" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add lowest valid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "::" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "::" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add highest valid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should not be able to add invalid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "10000:10000:10000:10000:10000:10000:10000:10000" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expectedStatusError: "DNSNameResolver.network.openshift.io \"example\" is invalid: [: Invalid value: \"\": \"status.resolvedNames[0].resolvedAddresses[0].ip\" must validate at least one schema (anyOf), status.resolvedNames[0].resolvedAddresses[0].ip: Invalid value: \"10000:10000:10000:10000:10000:10000:10000:10000\": status.resolvedNames[0].resolvedAddresses[0].ip in body must be of type ipv4: \"10000:10000:10000:10000:10000:10000:10000:10000\", : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]" diff --git a/vendor/github.com/openshift/api/network/v1alpha1/doc.go b/vendor/github.com/openshift/api/network/v1alpha1/doc.go new file mode 100644 index 0000000000..35539c458c --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=network.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/api/network/v1alpha1/register.go b/vendor/github.com/openshift/api/network/v1alpha1/register.go new file mode 100644 index 0000000000..6d80c234ba --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/register.go @@ -0,0 +1,40 @@ +package v1alpha1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "network.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &DNSNameResolver{}, + &DNSNameResolverList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/network/v1alpha1/techpreview.dnsnameresolver.testsuite.yaml b/vendor/github.com/openshift/api/network/v1alpha1/techpreview.dnsnameresolver.testsuite.yaml new file mode 100644 index 0000000000..411e5ffcdc --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/techpreview.dnsnameresolver.testsuite.yaml @@ -0,0 +1,402 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreviewNoUpgrade] DNSNameResolver" +crd: 0000_70_dnsnameresolver_00-techpreview.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal DNSNameResolver + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example.com. + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example.com. + - name: Should be able to create a minimal DNSNameResolver with a wildcard DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: "*.example.com." + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: "*.example.com." + - name: Should be able to specify DNS name with a '-' in a label + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example-domain.com. + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: www.example-domain.com. + - name: Should not be able to specify invalid DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www_example_com + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"www_example_com\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify DNS name with a label starting with '-' + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: -example.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"-example.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify DNS name with a label ending with '-' + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: example-.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"example-.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify DNS name without a trailing period + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"www.example.com\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify just the TLD in a DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify a wildcard before TLD in DNS name + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: "*.com." + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"*.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify a DNS name with a label containing uppercase letters + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: ABCD.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"ABCD.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should not be able to specify a DNS name with a label containing more than 63 characters + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz123456789012.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz123456789012.com.\": spec.name in body should match '^(\\*\\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\\.){2,}$'" + - name: Should be able to specify a DNS name with a label containing 63 characters + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz12345678901.com. + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + spec: + name: abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz12345678901.com. + onUpdate: + - name: Should not be able to update spec.name field + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.newexample.com. + expectedError: "DNSNameResolver.network.openshift.io \"example\" is invalid: spec.name: Invalid value: \"string\": spec.name is immutable" + - name: Should be able to add valid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "192.168.1.1" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "192.168.1.1" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add lowest valid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "0.0.0.0" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "0.0.0.0" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add highest valid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "255.255.255.255" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "255.255.255.255" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should not be able to add invalid IPv4 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "256.256.256.256" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expectedStatusError: "DNSNameResolver.network.openshift.io \"example\" is invalid: [: Invalid value: \"\": \"status.resolvedNames[0].resolvedAddresses[0].ip\" must validate at least one schema (anyOf), status.resolvedNames[0].resolvedAddresses[0].ip: Invalid value: \"256.256.256.256\": status.resolvedNames[0].resolvedAddresses[0].ip in body must be of type ipv4: \"256.256.256.256\", : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]" + - name: Should be able to add valid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "2001:db8:3333:4444:5555:6666:7777:8888" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "2001:db8:3333:4444:5555:6666:7777:8888" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add lowest valid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "::" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "::" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should be able to add highest valid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expected: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + - name: Should not be able to add invalid IPv6 address + initial: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + updated: | + apiVersion: network.openshift.io/v1alpha1 + kind: DNSNameResolver + metadata: + name: example + spec: + name: www.example.com. + status: + resolvedNames: + - dnsName: www.example.com. + resolvedAddresses: + - ip: "10000:10000:10000:10000:10000:10000:10000:10000" + ttlSeconds: 60 + lastLookupTime: "2023-08-08T15:07:04Z" + expectedStatusError: "DNSNameResolver.network.openshift.io \"example\" is invalid: [: Invalid value: \"\": \"status.resolvedNames[0].resolvedAddresses[0].ip\" must validate at least one schema (anyOf), status.resolvedNames[0].resolvedAddresses[0].ip: Invalid value: \"10000:10000:10000:10000:10000:10000:10000:10000\": status.resolvedNames[0].resolvedAddresses[0].ip in body must be of type ipv4: \"10000:10000:10000:10000:10000:10000:10000:10000\", : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]" diff --git a/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go b/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go new file mode 100644 index 0000000000..4e0199d7e7 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go @@ -0,0 +1,139 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +openshift:compatibility-gen:level=4 + +// DNSNameResolver stores the DNS name resolution information of a DNS name. It can be enabled by the TechPreviewNoUpgrade feature set. +// It can also be enabled by the feature gate DNSNameResolver when using CustomNoUpgrade feature set. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +type DNSNameResolver struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the DNSNameResolver. + // +kubebuilder:validation:Required + Spec DNSNameResolverSpec `json:"spec"` + // status is the most recently observed status of the DNSNameResolver. + // +optional + Status DNSNameResolverStatus `json:"status,omitempty"` +} + +// DNSName is used for validation of a DNS name. +// +kubebuilder:validation:Pattern=`^(\*\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\.){2,}$` +// +kubebuilder:validation:MaxLength=254 +type DNSName string + +// DNSNameResolverSpec is a desired state description of DNSNameResolver. +type DNSNameResolverSpec struct { + // name is the DNS name for which the DNS name resolution information will be stored. + // For a regular DNS name, only the DNS name resolution information of the regular DNS + // name will be stored. For a wildcard DNS name, the DNS name resolution information + // of all the DNS names that match the wildcard DNS name will be stored. + // For a wildcard DNS name, the '*' will match only one label. Additionally, only a single + // '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.' + // will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.' + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="spec.name is immutable" + Name DNSName `json:"name"` +} + +// DNSNameResolverStatus defines the observed status of DNSNameResolver. +type DNSNameResolverStatus struct { + // resolvedNames contains a list of matching DNS names and their corresponding IP addresses + // along with their TTL and last DNS lookup times. + // +listType=map + // +listMapKey=dnsName + // +patchMergeKey=dnsName + // +patchStrategy=merge + // +optional + ResolvedNames []DNSNameResolverResolvedName `json:"resolvedNames,omitempty" patchStrategy:"merge" patchMergeKey:"dnsName"` +} + +// DNSNameResolverResolvedName describes the details of a resolved DNS name. +type DNSNameResolverResolvedName struct { + // conditions provide information about the state of the DNS name. + // Known .status.conditions.type is: "Degraded". + // "Degraded" is true when the last resolution failed for the DNS name, + // and false otherwise. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // dnsName is the resolved DNS name matching the name field of DNSNameResolverSpec. This field can + // store both regular and wildcard DNS names which match the spec.name field. When the spec.name + // field contains a regular DNS name, this field will store the same regular DNS name after it is + // successfully resolved. When the spec.name field contains a wildcard DNS name, each resolvedName.dnsName + // will store the regular DNS names which match the wildcard DNS name and have been successfully resolved. + // If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard + // DNS name as well. + // +kubebuilder:validation:Required + DNSName DNSName `json:"dnsName"` + + // resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last + // lookup times for the dnsName. + // +kubebuilder:validation:Required + // +listType=map + // +listMapKey=ip + ResolvedAddresses []DNSNameResolverResolvedAddress `json:"resolvedAddresses"` + + // resolutionFailures keeps the count of how many consecutive times the DNS resolution failed + // for the dnsName. If the DNS resolution succeeds then the field will be set to zero. Upon + // every failure, the value of the field will be incremented by one. The details about the DNS + // name will be removed, if the value of resolutionFailures reaches 5 and the TTL of all the + // associated IP addresses have expired. + ResolutionFailures int32 `json:"resolutionFailures,omitempty"` +} + +// DNSNameResolverResolvedAddress describes the details of an IP address for a resolved DNS name. +type DNSNameResolverResolvedAddress struct { + // ip is an IP address associated with the dnsName. The validity of the IP address expires after + // lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon + // the expiration of the IP address's validity. If the information is not refreshed then it will + // be removed with a grace period after the expiration of the IP address's validity. + // +kubebuilder:validation:Required + IP string `json:"ip"` + + // ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after + // lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with + // the current time-to-live value. If the information is not refreshed then it will be removed with a + // grace period after the expiration of the IP address's validity. + // +kubebuilder:validation:Required + TTLSeconds int32 `json:"ttlSeconds"` + + // lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of + // the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to + // the current time on a successful DNS lookup. If the information is not refreshed then it will be + // removed with a grace period after the expiration of the IP address's validity. + // +kubebuilder:validation:Required + LastLookupTime *metav1.Time `json:"lastLookupTime"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +openshift:compatibility-gen:level=4 + +// DNSNameResolverList contains a list of DNSNameResolvers. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +type DNSNameResolverList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + // items gives the list of DNSNameResolvers. + Items []DNSNameResolver `json:"items"` +} diff --git a/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..b8308c3f83 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,161 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolver) DeepCopyInto(out *DNSNameResolver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolver. +func (in *DNSNameResolver) DeepCopy() *DNSNameResolver { + if in == nil { + return nil + } + out := new(DNSNameResolver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSNameResolver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverList) DeepCopyInto(out *DNSNameResolverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNSNameResolver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverList. +func (in *DNSNameResolverList) DeepCopy() *DNSNameResolverList { + if in == nil { + return nil + } + out := new(DNSNameResolverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSNameResolverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverResolvedAddress) DeepCopyInto(out *DNSNameResolverResolvedAddress) { + *out = *in + if in.LastLookupTime != nil { + in, out := &in.LastLookupTime, &out.LastLookupTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverResolvedAddress. +func (in *DNSNameResolverResolvedAddress) DeepCopy() *DNSNameResolverResolvedAddress { + if in == nil { + return nil + } + out := new(DNSNameResolverResolvedAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverResolvedName) DeepCopyInto(out *DNSNameResolverResolvedName) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResolvedAddresses != nil { + in, out := &in.ResolvedAddresses, &out.ResolvedAddresses + *out = make([]DNSNameResolverResolvedAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverResolvedName. +func (in *DNSNameResolverResolvedName) DeepCopy() *DNSNameResolverResolvedName { + if in == nil { + return nil + } + out := new(DNSNameResolverResolvedName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverSpec) DeepCopyInto(out *DNSNameResolverSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverSpec. +func (in *DNSNameResolverSpec) DeepCopy() *DNSNameResolverSpec { + if in == nil { + return nil + } + out := new(DNSNameResolverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNameResolverStatus) DeepCopyInto(out *DNSNameResolverStatus) { + *out = *in + if in.ResolvedNames != nil { + in, out := &in.ResolvedNames, &out.ResolvedNames + *out = make([]DNSNameResolverResolvedName, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverStatus. +func (in *DNSNameResolverStatus) DeepCopy() *DNSNameResolverStatus { + if in == nil { + return nil + } + out := new(DNSNameResolverStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000..e5018a9736 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,76 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_DNSNameResolver = map[string]string{ + "": "DNSNameResolver stores the DNS name resolution information of a DNS name. It can be enabled by the TechPreviewNoUpgrade feature set. It can also be enabled by the feature gate DNSNameResolver when using CustomNoUpgrade feature set.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the DNSNameResolver.", + "status": "status is the most recently observed status of the DNSNameResolver.", +} + +func (DNSNameResolver) SwaggerDoc() map[string]string { + return map_DNSNameResolver +} + +var map_DNSNameResolverList = map[string]string{ + "": "DNSNameResolverList contains a list of DNSNameResolvers.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items gives the list of DNSNameResolvers.", +} + +func (DNSNameResolverList) SwaggerDoc() map[string]string { + return map_DNSNameResolverList +} + +var map_DNSNameResolverResolvedAddress = map[string]string{ + "": "DNSNameResolverResolvedAddress describes the details of an IP address for a resolved DNS name.", + "ip": "ip is an IP address associated with the dnsName. The validity of the IP address expires after lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon the expiration of the IP address's validity. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity.", + "ttlSeconds": "ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with the current time-to-live value. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity.", + "lastLookupTime": "lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to the current time on a successful DNS lookup. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity.", +} + +func (DNSNameResolverResolvedAddress) SwaggerDoc() map[string]string { + return map_DNSNameResolverResolvedAddress +} + +var map_DNSNameResolverResolvedName = map[string]string{ + "": "DNSNameResolverResolvedName describes the details of a resolved DNS name.", + "conditions": "conditions provide information about the state of the DNS name. Known .status.conditions.type is: \"Degraded\". \"Degraded\" is true when the last resolution failed for the DNS name, and false otherwise.", + "dnsName": "dnsName is the resolved DNS name matching the name field of DNSNameResolverSpec. This field can store both regular and wildcard DNS names which match the spec.name field. When the spec.name field contains a regular DNS name, this field will store the same regular DNS name after it is successfully resolved. When the spec.name field contains a wildcard DNS name, each resolvedName.dnsName will store the regular DNS names which match the wildcard DNS name and have been successfully resolved. If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard DNS name as well.", + "resolvedAddresses": "resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last lookup times for the dnsName.", + "resolutionFailures": "resolutionFailures keeps the count of how many consecutive times the DNS resolution failed for the dnsName. If the DNS resolution succeeds then the field will be set to zero. Upon every failure, the value of the field will be incremented by one. The details about the DNS name will be removed, if the value of resolutionFailures reaches 5 and the TTL of all the associated IP addresses have expired.", +} + +func (DNSNameResolverResolvedName) SwaggerDoc() map[string]string { + return map_DNSNameResolverResolvedName +} + +var map_DNSNameResolverSpec = map[string]string{ + "": "DNSNameResolverSpec is a desired state description of DNSNameResolver.", + "name": "name is the DNS name for which the DNS name resolution information will be stored. For a regular DNS name, only the DNS name resolution information of the regular DNS name will be stored. For a wildcard DNS name, the DNS name resolution information of all the DNS names that match the wildcard DNS name will be stored. For a wildcard DNS name, the '*' will match only one label. Additionally, only a single '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.' will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.'", +} + +func (DNSNameResolverSpec) SwaggerDoc() map[string]string { + return map_DNSNameResolverSpec +} + +var map_DNSNameResolverStatus = map[string]string{ + "": "DNSNameResolverStatus defines the observed status of DNSNameResolver.", + "resolvedNames": "resolvedNames contains a list of matching DNS names and their corresponding IP addresses along with their TTL and last DNS lookup times.", +} + +func (DNSNameResolverStatus) SwaggerDoc() map[string]string { + return map_DNSNameResolverStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go index 05f0d795de..be364a5e37 100644 --- a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go @@ -200,19 +200,21 @@ const ( OpenShiftServiceAccountController OpenShiftControllerName = "openshift.io/serviceaccount" OpenShiftDefaultRoleBindingsController OpenShiftControllerName = "openshift.io/default-rolebindings" OpenShiftServiceAccountPullSecretsController OpenShiftControllerName = "openshift.io/serviceaccount-pull-secrets" - OpenshiftOriginNamespaceController OpenShiftControllerName = "openshift.io/origin-namespace" - OpenshiftBuildController OpenShiftControllerName = "openshift.io/build" - OpenshiftBuildConfigChangeController OpenShiftControllerName = "openshift.io/build-config-change" + OpenShiftOriginNamespaceController OpenShiftControllerName = "openshift.io/origin-namespace" + OpenShiftBuildController OpenShiftControllerName = "openshift.io/build" + OpenShiftBuildConfigChangeController OpenShiftControllerName = "openshift.io/build-config-change" OpenShiftBuilderServiceAccountController OpenShiftControllerName = "openshift.io/builder-serviceaccount" - OpenshiftDeployerController OpenShiftControllerName = "openshift.io/deployer" + OpenShiftDeployerController OpenShiftControllerName = "openshift.io/deployer" OpenShiftDeployerServiceAccountController OpenShiftControllerName = "openshift.io/deployer-serviceaccount" - OpenshiftDeploymentConfigController OpenShiftControllerName = "openshift.io/deploymentconfig" - OpenshiftImageTriggerController OpenShiftControllerName = "openshift.io/image-trigger" - OpenshiftImageImportController OpenShiftControllerName = "openshift.io/image-import" - OpenshiftImageSignatureImportController OpenShiftControllerName = "openshift.io/image-signature-import" - OpenshiftTemplateInstanceController OpenShiftControllerName = "openshift.io/templateinstance" - OpenshiftTemplateInstanceFinalizerController OpenShiftControllerName = "openshift.io/templateinstancefinalizer" - OpenshiftUnidlingController OpenShiftControllerName = "openshift.io/unidling" + OpenShiftDeploymentConfigController OpenShiftControllerName = "openshift.io/deploymentconfig" + OpenShiftImageTriggerController OpenShiftControllerName = "openshift.io/image-trigger" + OpenShiftImageImportController OpenShiftControllerName = "openshift.io/image-import" + OpenShiftImageSignatureImportController OpenShiftControllerName = "openshift.io/image-signature-import" + OpenShiftTemplateInstanceController OpenShiftControllerName = "openshift.io/templateinstance" + OpenShiftTemplateInstanceFinalizerController OpenShiftControllerName = "openshift.io/templateinstancefinalizer" + OpenShiftUnidlingController OpenShiftControllerName = "openshift.io/unidling" + OpenShiftIngressIPController OpenShiftControllerName = "openshift.io/ingress-ip" + OpenShiftIngressToRouteController OpenShiftControllerName = "openshift.io/ingress-to-route" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml index bce7c8c7ee..7b0220d438 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml @@ -2,6 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: + capability.openshift.io/name: CloudCredential api-approved.openshift.io: https://github.com/openshift/api/pull/692 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml index a5be103ac1..c5188cc078 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml @@ -507,11 +507,6 @@ spec: required: - type type: object - x-kubernetes-validations: - - message: container cannot be specified unless type is Container - rule: '!has(self.type) || self.type == "Container" || !has(self.container) || !has(self.container.maxLength) || self.container.maxLength == 1024' - - message: container and syslog cannot both be specified at the same time - rule: '!has(self.container) || !has(self.container.maxLength) || self.container.maxLength == 1024 || !has(self.syslog)' httpCaptureCookies: description: httpCaptureCookies specifies HTTP cookies that should be captured in access logs. If this field is empty, no cookies are captured. items: @@ -747,7 +742,7 @@ spec: description: "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers. \n If unset, the default is based on the apiservers.config.openshift.io/cluster resource. \n Note that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout." properties: custom: - description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1" + description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: VersionTLS11" nullable: true properties: ciphers: @@ -756,7 +751,7 @@ spec: type: string type: array minTLSVersion: - description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: VersionTLS11 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" enum: - VersionTLS10 - VersionTLS11 @@ -765,15 +760,15 @@ spec: type: string type: object intermediate: - description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2" + description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: VersionTLS12" nullable: true type: object modern: - description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: VersionTLS13 \n NOTE: Currently unsupported." nullable: true type: object old: - description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: VersionTLS10" nullable: true type: object type: @@ -1122,7 +1117,7 @@ spec: type: string type: array minTLSVersion: - description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: VersionTLS11 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" enum: - VersionTLS10 - VersionTLS11 diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01-CustomNoUpgrade.crd.yaml similarity index 89% rename from vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml rename to vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01-CustomNoUpgrade.crd.yaml index 83222ab78c..4938ac2797 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01-CustomNoUpgrade.crd.yaml @@ -5,6 +5,7 @@ metadata: api-approved.openshift.io: https://github.com/openshift/api/pull/475 include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: CustomNoUpgrade name: networks.operator.openshift.io spec: group: operator.openshift.io @@ -138,43 +139,6 @@ spec: description: defaultNetwork is the "default" network that all pods will receive type: object properties: - kuryrConfig: - description: KuryrConfig configures the kuryr plugin - type: object - properties: - controllerProbesPort: - description: The port kuryr-controller will listen for readiness and liveness requests. - type: integer - format: int32 - minimum: 0 - daemonProbesPort: - description: The port kuryr-daemon will listen for readiness and liveness requests. - type: integer - format: int32 - minimum: 0 - enablePortPoolsPrepopulation: - description: enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create one. It creates a number of ports when the first pod that is configured to use the dedicated network for pods is created in a namespace, and keeps them ready to be attached to pods. Port prepopulation is disabled by default. - type: boolean - mtu: - description: mtu is the MTU that Kuryr should use when creating pod networks in Neutron. The value has to be lower or equal to the MTU of the nodes network and Neutron has to allow creation of tenant networks with such MTU. If unset Pod networks will be created with the same MTU as the nodes network has. This also affects the services network created by cluster-network-operator. - type: integer - format: int32 - minimum: 0 - openStackServiceNetwork: - description: openStackServiceNetwork contains the CIDR of network from which to allocate IPs for OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses two IPs from that network for each loadbalancer - one given by OpenShift and second for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix size by 1. - type: string - poolBatchPorts: - description: poolBatchPorts sets a number of ports that should be created in a single batch request to extend the port pool. The default is 3. For more information about port pools see enablePortPoolsPrepopulation setting. - type: integer - minimum: 0 - poolMaxPorts: - description: poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. If the number of ports exceeds this setting, free ports will get deleted. Setting 0 will disable this upper bound, effectively preventing pools from shrinking and this is the default value. For more information about port pools see enablePortPoolsPrepopulation setting. - type: integer - minimum: 0 - poolMinPorts: - description: poolMinPorts sets a minimum number of free ports that should be kept in a port pool. If the number of ports is lower than this setting, new ports will get created and added to pool. The default is 1. For more information about port pools see enablePortPoolsPrepopulation setting. - type: integer - minimum: 1 openshiftSDNConfig: description: openShiftSDNConfig configures the openshift-sdn plugin type: object @@ -254,21 +218,21 @@ spec: message: IPv6 addresses must contain at most one '::' and may only be shortened once - rule: 'self.contains(''::'') ? self.split(''/'')[0].split('':'').size() <= 8 : self.split(''/'')[0].split('':'').size() == 8' message: a valid IPv6 address must contain 8 segments unless elided (::), in which case it must contain at most 6 non-empty segments - - rule: 'self.split(''/'')[0].split('':'').size() >=1 ? [self.split(''/'')[0].split('':'', 8)[0]].all(x, x == '''' || x.matches(''[0-9A-Fa-f]{1,4}'')) : true' + - rule: 'self.split(''/'')[0].split('':'').size() >=1 ? [self.split(''/'')[0].split('':'', 8)[0]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 1 - - rule: 'self.split(''/'')[0].split('':'').size() >=2 ? [self.split(''/'')[0].split('':'', 8)[1]].all(x, x == '''' || x.matches(''[0-9A-Fa-f]{1,4}'')) : true' + - rule: 'self.split(''/'')[0].split('':'').size() >=2 ? [self.split(''/'')[0].split('':'', 8)[1]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 2 - - rule: 'self.split(''/'')[0].split('':'').size() >=3 ? [self.split(''/'')[0].split('':'', 8)[2]].all(x, x == '''' || x.matches(''[0-9A-Fa-f]{1,4}'')) : true' + - rule: 'self.split(''/'')[0].split('':'').size() >=3 ? [self.split(''/'')[0].split('':'', 8)[2]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 3 - - rule: 'self.split(''/'')[0].split('':'').size() >=4 ? [self.split(''/'')[0].split('':'', 8)[3]].all(x, x == '''' || x.matches(''[0-9A-Fa-f]{1,4}'')) : true' + - rule: 'self.split(''/'')[0].split('':'').size() >=4 ? [self.split(''/'')[0].split('':'', 8)[3]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 4 - - rule: 'self.split(''/'')[0].split('':'').size() >=5 ? [self.split(''/'')[0].split('':'', 8)[4]].all(x, x == '''' || x.matches(''[0-9A-Fa-f]{1,4}'')) : true' + - rule: 'self.split(''/'')[0].split('':'').size() >=5 ? [self.split(''/'')[0].split('':'', 8)[4]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 5 - - rule: 'self.split(''/'')[0].split('':'').size() >=6 ? [self.split(''/'')[0].split('':'', 8)[5]].all(x, x == '''' || x.matches(''[0-9A-Fa-f]{1,4}'')) : true' + - rule: 'self.split(''/'')[0].split('':'').size() >=6 ? [self.split(''/'')[0].split('':'', 8)[5]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 6 - - rule: 'self.split(''/'')[0].split('':'').size() >=7 ? [self.split(''/'')[0].split('':'', 8)[6]].all(x, x == '''' || x.matches(''[0-9A-Fa-f]{1,4}'')) : true' + - rule: 'self.split(''/'')[0].split('':'').size() >=7 ? [self.split(''/'')[0].split('':'', 8)[6]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 7 - - rule: 'self.split(''/'')[0].split('':'').size() >=8 ? [self.split(''/'')[0].split('':'', 8)[7]].all(x, x == '''' || x.matches(''[0-9A-Fa-f]{1,4}'')) : true' + - rule: 'self.split(''/'')[0].split('':'').size() >=8 ? [self.split(''/'')[0].split('':'', 8)[7]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 8 - rule: '!self.contains(''.'')' message: IPv6 dual addresses are not permitted, value should not contain `.` characters @@ -451,6 +415,13 @@ spec: description: multicast specifies whether or not the multicast configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and multicast configure is migrated. type: boolean default: true + mode: + description: mode indicates the mode of network migration. The supported values are "Live", "Offline" and omitted. A "Live" migration operation will not cause service interruption by migrating the CNI of each node one by one. The cluster network will work as normal during the network migration. An "Offline" migration operation will cause service interruption. During an "Offline" migration, two rounds of node reboots are required. The cluster network will be malfunctioning during the network migration. When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default value is "Offline". + type: string + enum: + - Live + - Offline + - "" mtu: description: mtu contains the MTU migration configuration. Set this to allow changing the MTU values for the default network. If unset, the operation of changing the MTU for the default network will be rejected. type: object @@ -486,6 +457,9 @@ spec: networkType: description: networkType is the target type of network migration. Set this to the target network type to allow changing the default network. If unset, the operation of changing cluster default network plugin will be rejected. The supported values are OpenShiftSDN, OVNKubernetes type: string + x-kubernetes-validations: + - rule: '!has(self.mtu) || !has(self.networkType) || self.networkType == '''' || has(self.mode) && self.mode == ''Live''' + message: networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration observedConfig: description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator type: object diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01-Default.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01-Default.crd.yaml new file mode 100644 index 0000000000..83849f24b7 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01-Default.crd.yaml @@ -0,0 +1,541 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: Default + name: networks.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSpec is the top-level network configuration object. + type: object + properties: + additionalNetworks: + description: additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled. + type: array + items: + description: AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one "Config" that matches the type. + type: object + properties: + name: + description: name is the name of the network. This will be populated in the resulting CRD This must be unique. + type: string + namespace: + description: namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace. + type: string + rawCNIConfig: + description: rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD + type: string + simpleMacvlanConfig: + description: SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan + type: object + properties: + ipamConfig: + description: IPAMConfig configures IPAM module will be used for IP Address Management (IPAM). + type: object + properties: + staticIPAMConfig: + description: StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic + type: object + properties: + addresses: + description: Addresses configures IP address for the interface + type: array + items: + description: StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses + type: object + properties: + address: + description: Address is the IP address in CIDR format + type: string + gateway: + description: Gateway is IP inside of subnet to designate as the gateway + type: string + dns: + description: DNS configures DNS for the interface + type: object + properties: + domain: + description: Domain configures the domainname the local domain used for short hostname lookups + type: string + nameservers: + description: Nameservers points DNS servers for IP lookup + type: array + items: + type: string + search: + description: Search configures priority ordered search domains for short hostname lookups + type: array + items: + type: string + routes: + description: Routes configures IP routes for the interface + type: array + items: + description: StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes + type: object + properties: + destination: + description: Destination points the IP route destination + type: string + gateway: + description: Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin). + type: string + type: + description: Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic + type: string + master: + description: master is the host interface to create the macvlan interface from. If not specified, it will be default route interface + type: string + mode: + description: 'mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge' + type: string + mtu: + description: mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value. + type: integer + format: int32 + minimum: 0 + type: + description: type is the type of network The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan + type: string + clusterNetwork: + description: clusterNetwork is the IP address pool to use for pod IPs. Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr. + type: array + items: + description: ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks + type: object + properties: + cidr: + type: string + hostPrefix: + type: integer + format: int32 + minimum: 0 + defaultNetwork: + description: defaultNetwork is the "default" network that all pods will receive + type: object + properties: + openshiftSDNConfig: + description: openShiftSDNConfig configures the openshift-sdn plugin + type: object + properties: + enableUnidling: + description: enableUnidling controls whether or not the service proxy will support idling and unidling of services. By default, unidling is enabled. + type: boolean + mode: + description: mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + type: string + mtu: + description: mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink. + type: integer + format: int32 + minimum: 0 + useExternalOpenvswitch: + description: 'useExternalOpenvswitch used to control whether the operator would deploy an OVS DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always run as a system service, and this flag is ignored. DEPRECATED: non-functional as of 4.6' + type: boolean + vxlanPort: + description: vxlanPort is the port to use for all vxlan packets. The default is 4789. + type: integer + format: int32 + minimum: 0 + ovnKubernetesConfig: + description: ovnKubernetesConfig configures the ovn-kubernetes plugin. + type: object + properties: + egressIPConfig: + description: egressIPConfig holds the configuration for EgressIP options. + type: object + properties: + reachabilityTotalTimeoutSeconds: + description: reachabilityTotalTimeout configures the EgressIP node reachability check total timeout in seconds. If the EgressIP node cannot be reached within this timeout, the node is declared down. Setting a large value may cause the EgressIP feature to react slowly to node changes. In particular, it may react slowly for EgressIP nodes that really have a genuine problem and are unreachable. When omitted, this means the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 1 second. A value of 0 disables the EgressIP node's reachability check. + type: integer + format: int32 + maximum: 60 + minimum: 0 + gatewayConfig: + description: gatewayConfig holds the configuration for node gateway options. + type: object + properties: + ipForwarding: + description: IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to "Global". The supported values are "Restricted" and "Global". + type: string + ipv4: + description: ipv4 allows users to configure IP settings for IPv4 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv4 for details of default values. + type: object + properties: + internalMasqueradeSubnet: + description: internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.169.0/29 The value must be in proper IPV4 CIDR format + type: string + maxLength: 18 + x-kubernetes-validations: + - rule: self.indexOf('/') == self.lastIndexOf('/') + message: CIDR format must contain exactly one '/' + - rule: '[int(self.split(''/'')[1])].all(x, x <= 29 && x >= 0)' + message: subnet must be in the range /0 to /29 inclusive + - rule: self.split('/')[0].split('.').size() == 4 + message: a valid IPv4 address must contain 4 octets + - rule: '[self.findAll(''[0-9]+'')[0]].all(x, x != ''0'' && int(x) <= 255 && !x.startsWith(''0''))' + message: first IP address octet must not contain leading zeros, must be greater than 0 and less or equal to 255 + - rule: '[self.findAll(''[0-9]+'')[1], self.findAll(''[0-9]+'')[2], self.findAll(''[0-9]+'')[3]].all(x, int(x) <= 255 && (x == ''0'' || !x.startsWith(''0'')))' + message: IP address octets must not contain leading zeros, and must be less or equal to 255 + ipv6: + description: ipv6 allows users to configure IP settings for IPv6 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv6 for details of default values. + type: object + properties: + internalMasqueradeSubnet: + description: internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /125). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is fd69::/125 Note that IPV6 dual addresses are not permitted + type: string + x-kubernetes-validations: + - rule: self.indexOf('/') == self.lastIndexOf('/') + message: CIDR format must contain exactly one '/' + - rule: self.split('/').size() == 2 && [int(self.split('/')[1])].all(x, x <= 125 && x >= 0) + message: subnet must be in the range /0 to /125 inclusive + - rule: self.indexOf('::') == self.lastIndexOf('::') + message: IPv6 addresses must contain at most one '::' and may only be shortened once + - rule: 'self.contains(''::'') ? self.split(''/'')[0].split('':'').size() <= 8 : self.split(''/'')[0].split('':'').size() == 8' + message: a valid IPv6 address must contain 8 segments unless elided (::), in which case it must contain at most 6 non-empty segments + - rule: 'self.split(''/'')[0].split('':'').size() >=1 ? [self.split(''/'')[0].split('':'', 8)[0]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 1 + - rule: 'self.split(''/'')[0].split('':'').size() >=2 ? [self.split(''/'')[0].split('':'', 8)[1]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 2 + - rule: 'self.split(''/'')[0].split('':'').size() >=3 ? [self.split(''/'')[0].split('':'', 8)[2]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 3 + - rule: 'self.split(''/'')[0].split('':'').size() >=4 ? [self.split(''/'')[0].split('':'', 8)[3]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 4 + - rule: 'self.split(''/'')[0].split('':'').size() >=5 ? [self.split(''/'')[0].split('':'', 8)[4]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 5 + - rule: 'self.split(''/'')[0].split('':'').size() >=6 ? [self.split(''/'')[0].split('':'', 8)[5]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 6 + - rule: 'self.split(''/'')[0].split('':'').size() >=7 ? [self.split(''/'')[0].split('':'', 8)[6]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 7 + - rule: 'self.split(''/'')[0].split('':'').size() >=8 ? [self.split(''/'')[0].split('':'', 8)[7]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 8 + - rule: '!self.contains(''.'')' + message: IPv6 dual addresses are not permitted, value should not contain `.` characters + routingViaHost: + description: RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified. + type: boolean + default: false + genevePort: + description: geneve port is the UDP port to be used by geneve encapulation. Default is 6081 + type: integer + format: int32 + minimum: 1 + hybridOverlayConfig: + description: HybridOverlayConfig configures an additional overlay network for peers that are not using OVN. + type: object + properties: + hybridClusterNetwork: + description: HybridClusterNetwork defines a network space given to nodes on an additional overlay network. + type: array + items: + description: ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks + type: object + properties: + cidr: + type: string + hostPrefix: + type: integer + format: int32 + minimum: 0 + hybridOverlayVXLANPort: + description: HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789 + type: integer + format: int32 + ipsecConfig: + description: ipsecConfig enables and configures IPsec for pods on the pod network within the cluster. + type: object + mtu: + description: mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400 + type: integer + format: int32 + minimum: 0 + policyAuditConfig: + description: policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used. + type: object + properties: + destination: + description: 'destination is the location for policy log messages. Regardless of this config, persistent logs will always be dumped to the host at /var/log/ovn/ however Additionally syslog output may be configured as follows. Valid values are: - "libc" -> to use the libc syslog() function of the host node''s journdald process - "udp:host:port" -> for sending syslog over UDP - "unix:file" -> for using the UNIX domain socket directly - "null" -> to discard all messages logged to syslog The default is "null"' + type: string + default: "null" + maxFileSize: + description: maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs Units are in MB and the Default is 50MB + type: integer + format: int32 + default: 50 + minimum: 1 + maxLogFiles: + description: maxLogFiles specifies the maximum number of ACL_audit log files that can be present. + type: integer + format: int32 + default: 5 + minimum: 1 + rateLimit: + description: rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used. + type: integer + format: int32 + default: 20 + minimum: 1 + syslogFacility: + description: syslogFacility the RFC5424 facility for generated messages, e.g. "kern". Default is "local0" + type: string + default: local0 + v4InternalSubnet: + description: v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is 100.64.0.0/16 + type: string + v6InternalSubnet: + description: v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is fd98::/48 + type: string + type: + description: type is the type of network All NetworkTypes are supported except for NetworkTypeRaw + type: string + deployKubeProxy: + description: deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when OpenShift SDN and ovn-kubernetes are used and true otherwise. + type: boolean + disableMultiNetwork: + description: disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled. + type: boolean + disableNetworkDiagnostics: + description: disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck CRs from a test pod to every node, apiserver and LB should be disabled or not. If unset, this property defaults to 'false' and network diagnostics is enabled. Setting this to 'true' would reduce the additional load of the pods performing the checks. + type: boolean + default: false + exportNetworkFlows: + description: exportNetworkFlows enables and configures the export of network flow metadata from the pod network by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. If unset, flows will not be exported to any collector. + type: object + properties: + ipfix: + description: ipfix defines IPFIX configuration. + type: object + properties: + collectors: + description: ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items + type: array + maxItems: 10 + minItems: 1 + items: + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + netFlow: + description: netFlow defines the NetFlow configuration. + type: object + properties: + collectors: + description: netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. It is a list of strings formatted as ip:port with a maximum of ten items + type: array + maxItems: 10 + minItems: 1 + items: + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + sFlow: + description: sFlow defines the SFlow configuration. + type: object + properties: + collectors: + description: sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items + type: array + maxItems: 10 + minItems: 1 + items: + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + kubeProxyConfig: + description: kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn. + type: object + properties: + bindAddress: + description: The address to "bind" on Defaults to 0.0.0.0 + type: string + iptablesSyncPeriod: + description: 'An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted in large clusters for performance reasons, but this is no longer necessary, and there is no reason to change this from the default value. Default: 30s' + type: string + proxyArguments: + description: Any additional arguments to pass to the kubeproxy process + type: object + additionalProperties: + description: ProxyArgumentList is a list of arguments to pass to the kubeproxy process + type: array + items: + type: string + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + migration: + description: migration enables and configures the cluster network migration. The migration procedure allows to change the network type and the MTU. + type: object + properties: + features: + description: features contains the features migration configuration. Set this to migrate feature configuration when changing the cluster default network provider. if unset, the default operation is to migrate all the configuration of supported features. + type: object + properties: + egressFirewall: + description: egressFirewall specifies whether or not the Egress Firewall configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and Egress Firewall configure is migrated. + type: boolean + default: true + egressIP: + description: egressIP specifies whether or not the Egress IP configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and Egress IP configure is migrated. + type: boolean + default: true + multicast: + description: multicast specifies whether or not the multicast configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and multicast configure is migrated. + type: boolean + default: true + mtu: + description: mtu contains the MTU migration configuration. Set this to allow changing the MTU values for the default network. If unset, the operation of changing the MTU for the default network will be rejected. + type: object + properties: + machine: + description: machine contains MTU migration configuration for the machine's uplink. Needs to be migrated along with the default network MTU unless the current uplink MTU already accommodates the default network MTU. + type: object + properties: + from: + description: from is the MTU to migrate from. + type: integer + format: int32 + minimum: 0 + to: + description: to is the MTU to migrate to. + type: integer + format: int32 + minimum: 0 + network: + description: network contains information about MTU migration for the default network. Migrations are only allowed to MTU values lower than the machine's uplink MTU by the minimum appropriate offset. + type: object + properties: + from: + description: from is the MTU to migrate from. + type: integer + format: int32 + minimum: 0 + to: + description: to is the MTU to migrate to. + type: integer + format: int32 + minimum: 0 + networkType: + description: networkType is the target type of network migration. Set this to the target network type to allow changing the default network. If unset, the operation of changing cluster default network plugin will be rejected. The supported values are OpenShiftSDN, OVNKubernetes + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + serviceNetwork: + description: serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth. + type: array + items: + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + useMultiNetworkPolicy: + description: useMultiNetworkPolicy enables a controller which allows for MultiNetworkPolicy objects to be used on additional networks as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy objects, but NetworkPolicy objects only apply to the primary interface. With MultiNetworkPolicy, you can control the traffic that a pod can receive over the secondary interfaces. If unset, this property defaults to 'false' and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is 'true' then the value of this field is ignored. + type: boolean + status: + description: NetworkStatus is detailed operator status, which is distilled up to the Network clusteroperator object. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..294b5c945f --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,551 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: networks.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSpec is the top-level network configuration object. + type: object + properties: + additionalNetworks: + description: additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled. + type: array + items: + description: AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one "Config" that matches the type. + type: object + properties: + name: + description: name is the name of the network. This will be populated in the resulting CRD This must be unique. + type: string + namespace: + description: namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace. + type: string + rawCNIConfig: + description: rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD + type: string + simpleMacvlanConfig: + description: SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan + type: object + properties: + ipamConfig: + description: IPAMConfig configures IPAM module will be used for IP Address Management (IPAM). + type: object + properties: + staticIPAMConfig: + description: StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic + type: object + properties: + addresses: + description: Addresses configures IP address for the interface + type: array + items: + description: StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses + type: object + properties: + address: + description: Address is the IP address in CIDR format + type: string + gateway: + description: Gateway is IP inside of subnet to designate as the gateway + type: string + dns: + description: DNS configures DNS for the interface + type: object + properties: + domain: + description: Domain configures the domainname the local domain used for short hostname lookups + type: string + nameservers: + description: Nameservers points DNS servers for IP lookup + type: array + items: + type: string + search: + description: Search configures priority ordered search domains for short hostname lookups + type: array + items: + type: string + routes: + description: Routes configures IP routes for the interface + type: array + items: + description: StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes + type: object + properties: + destination: + description: Destination points the IP route destination + type: string + gateway: + description: Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin). + type: string + type: + description: Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic + type: string + master: + description: master is the host interface to create the macvlan interface from. If not specified, it will be default route interface + type: string + mode: + description: 'mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge' + type: string + mtu: + description: mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value. + type: integer + format: int32 + minimum: 0 + type: + description: type is the type of network The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan + type: string + clusterNetwork: + description: clusterNetwork is the IP address pool to use for pod IPs. Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr. + type: array + items: + description: ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks + type: object + properties: + cidr: + type: string + hostPrefix: + type: integer + format: int32 + minimum: 0 + defaultNetwork: + description: defaultNetwork is the "default" network that all pods will receive + type: object + properties: + openshiftSDNConfig: + description: openShiftSDNConfig configures the openshift-sdn plugin + type: object + properties: + enableUnidling: + description: enableUnidling controls whether or not the service proxy will support idling and unidling of services. By default, unidling is enabled. + type: boolean + mode: + description: mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + type: string + mtu: + description: mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink. + type: integer + format: int32 + minimum: 0 + useExternalOpenvswitch: + description: 'useExternalOpenvswitch used to control whether the operator would deploy an OVS DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always run as a system service, and this flag is ignored. DEPRECATED: non-functional as of 4.6' + type: boolean + vxlanPort: + description: vxlanPort is the port to use for all vxlan packets. The default is 4789. + type: integer + format: int32 + minimum: 0 + ovnKubernetesConfig: + description: ovnKubernetesConfig configures the ovn-kubernetes plugin. + type: object + properties: + egressIPConfig: + description: egressIPConfig holds the configuration for EgressIP options. + type: object + properties: + reachabilityTotalTimeoutSeconds: + description: reachabilityTotalTimeout configures the EgressIP node reachability check total timeout in seconds. If the EgressIP node cannot be reached within this timeout, the node is declared down. Setting a large value may cause the EgressIP feature to react slowly to node changes. In particular, it may react slowly for EgressIP nodes that really have a genuine problem and are unreachable. When omitted, this means the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 1 second. A value of 0 disables the EgressIP node's reachability check. + type: integer + format: int32 + maximum: 60 + minimum: 0 + gatewayConfig: + description: gatewayConfig holds the configuration for node gateway options. + type: object + properties: + ipForwarding: + description: IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to "Global". The supported values are "Restricted" and "Global". + type: string + ipv4: + description: ipv4 allows users to configure IP settings for IPv4 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv4 for details of default values. + type: object + properties: + internalMasqueradeSubnet: + description: internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.169.0/29 The value must be in proper IPV4 CIDR format + type: string + maxLength: 18 + x-kubernetes-validations: + - rule: self.indexOf('/') == self.lastIndexOf('/') + message: CIDR format must contain exactly one '/' + - rule: '[int(self.split(''/'')[1])].all(x, x <= 29 && x >= 0)' + message: subnet must be in the range /0 to /29 inclusive + - rule: self.split('/')[0].split('.').size() == 4 + message: a valid IPv4 address must contain 4 octets + - rule: '[self.findAll(''[0-9]+'')[0]].all(x, x != ''0'' && int(x) <= 255 && !x.startsWith(''0''))' + message: first IP address octet must not contain leading zeros, must be greater than 0 and less or equal to 255 + - rule: '[self.findAll(''[0-9]+'')[1], self.findAll(''[0-9]+'')[2], self.findAll(''[0-9]+'')[3]].all(x, int(x) <= 255 && (x == ''0'' || !x.startsWith(''0'')))' + message: IP address octets must not contain leading zeros, and must be less or equal to 255 + ipv6: + description: ipv6 allows users to configure IP settings for IPv6 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv6 for details of default values. + type: object + properties: + internalMasqueradeSubnet: + description: internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /125). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is fd69::/125 Note that IPV6 dual addresses are not permitted + type: string + x-kubernetes-validations: + - rule: self.indexOf('/') == self.lastIndexOf('/') + message: CIDR format must contain exactly one '/' + - rule: self.split('/').size() == 2 && [int(self.split('/')[1])].all(x, x <= 125 && x >= 0) + message: subnet must be in the range /0 to /125 inclusive + - rule: self.indexOf('::') == self.lastIndexOf('::') + message: IPv6 addresses must contain at most one '::' and may only be shortened once + - rule: 'self.contains(''::'') ? self.split(''/'')[0].split('':'').size() <= 8 : self.split(''/'')[0].split('':'').size() == 8' + message: a valid IPv6 address must contain 8 segments unless elided (::), in which case it must contain at most 6 non-empty segments + - rule: 'self.split(''/'')[0].split('':'').size() >=1 ? [self.split(''/'')[0].split('':'', 8)[0]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 1 + - rule: 'self.split(''/'')[0].split('':'').size() >=2 ? [self.split(''/'')[0].split('':'', 8)[1]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 2 + - rule: 'self.split(''/'')[0].split('':'').size() >=3 ? [self.split(''/'')[0].split('':'', 8)[2]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 3 + - rule: 'self.split(''/'')[0].split('':'').size() >=4 ? [self.split(''/'')[0].split('':'', 8)[3]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 4 + - rule: 'self.split(''/'')[0].split('':'').size() >=5 ? [self.split(''/'')[0].split('':'', 8)[4]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 5 + - rule: 'self.split(''/'')[0].split('':'').size() >=6 ? [self.split(''/'')[0].split('':'', 8)[5]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 6 + - rule: 'self.split(''/'')[0].split('':'').size() >=7 ? [self.split(''/'')[0].split('':'', 8)[6]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 7 + - rule: 'self.split(''/'')[0].split('':'').size() >=8 ? [self.split(''/'')[0].split('':'', 8)[7]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 8 + - rule: '!self.contains(''.'')' + message: IPv6 dual addresses are not permitted, value should not contain `.` characters + routingViaHost: + description: RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified. + type: boolean + default: false + genevePort: + description: geneve port is the UDP port to be used by geneve encapulation. Default is 6081 + type: integer + format: int32 + minimum: 1 + hybridOverlayConfig: + description: HybridOverlayConfig configures an additional overlay network for peers that are not using OVN. + type: object + properties: + hybridClusterNetwork: + description: HybridClusterNetwork defines a network space given to nodes on an additional overlay network. + type: array + items: + description: ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks + type: object + properties: + cidr: + type: string + hostPrefix: + type: integer + format: int32 + minimum: 0 + hybridOverlayVXLANPort: + description: HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789 + type: integer + format: int32 + ipsecConfig: + description: ipsecConfig enables and configures IPsec for pods on the pod network within the cluster. + type: object + mtu: + description: mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400 + type: integer + format: int32 + minimum: 0 + policyAuditConfig: + description: policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used. + type: object + properties: + destination: + description: 'destination is the location for policy log messages. Regardless of this config, persistent logs will always be dumped to the host at /var/log/ovn/ however Additionally syslog output may be configured as follows. Valid values are: - "libc" -> to use the libc syslog() function of the host node''s journdald process - "udp:host:port" -> for sending syslog over UDP - "unix:file" -> for using the UNIX domain socket directly - "null" -> to discard all messages logged to syslog The default is "null"' + type: string + default: "null" + maxFileSize: + description: maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs Units are in MB and the Default is 50MB + type: integer + format: int32 + default: 50 + minimum: 1 + maxLogFiles: + description: maxLogFiles specifies the maximum number of ACL_audit log files that can be present. + type: integer + format: int32 + default: 5 + minimum: 1 + rateLimit: + description: rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used. + type: integer + format: int32 + default: 20 + minimum: 1 + syslogFacility: + description: syslogFacility the RFC5424 facility for generated messages, e.g. "kern". Default is "local0" + type: string + default: local0 + v4InternalSubnet: + description: v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is 100.64.0.0/16 + type: string + v6InternalSubnet: + description: v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is fd98::/48 + type: string + type: + description: type is the type of network All NetworkTypes are supported except for NetworkTypeRaw + type: string + deployKubeProxy: + description: deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when OpenShift SDN and ovn-kubernetes are used and true otherwise. + type: boolean + disableMultiNetwork: + description: disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled. + type: boolean + disableNetworkDiagnostics: + description: disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck CRs from a test pod to every node, apiserver and LB should be disabled or not. If unset, this property defaults to 'false' and network diagnostics is enabled. Setting this to 'true' would reduce the additional load of the pods performing the checks. + type: boolean + default: false + exportNetworkFlows: + description: exportNetworkFlows enables and configures the export of network flow metadata from the pod network by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. If unset, flows will not be exported to any collector. + type: object + properties: + ipfix: + description: ipfix defines IPFIX configuration. + type: object + properties: + collectors: + description: ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items + type: array + maxItems: 10 + minItems: 1 + items: + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + netFlow: + description: netFlow defines the NetFlow configuration. + type: object + properties: + collectors: + description: netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. It is a list of strings formatted as ip:port with a maximum of ten items + type: array + maxItems: 10 + minItems: 1 + items: + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + sFlow: + description: sFlow defines the SFlow configuration. + type: object + properties: + collectors: + description: sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items + type: array + maxItems: 10 + minItems: 1 + items: + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + kubeProxyConfig: + description: kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn. + type: object + properties: + bindAddress: + description: The address to "bind" on Defaults to 0.0.0.0 + type: string + iptablesSyncPeriod: + description: 'An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted in large clusters for performance reasons, but this is no longer necessary, and there is no reason to change this from the default value. Default: 30s' + type: string + proxyArguments: + description: Any additional arguments to pass to the kubeproxy process + type: object + additionalProperties: + description: ProxyArgumentList is a list of arguments to pass to the kubeproxy process + type: array + items: + type: string + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + migration: + description: migration enables and configures the cluster network migration. The migration procedure allows to change the network type and the MTU. + type: object + properties: + features: + description: features contains the features migration configuration. Set this to migrate feature configuration when changing the cluster default network provider. if unset, the default operation is to migrate all the configuration of supported features. + type: object + properties: + egressFirewall: + description: egressFirewall specifies whether or not the Egress Firewall configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and Egress Firewall configure is migrated. + type: boolean + default: true + egressIP: + description: egressIP specifies whether or not the Egress IP configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and Egress IP configure is migrated. + type: boolean + default: true + multicast: + description: multicast specifies whether or not the multicast configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and multicast configure is migrated. + type: boolean + default: true + mode: + description: mode indicates the mode of network migration. The supported values are "Live", "Offline" and omitted. A "Live" migration operation will not cause service interruption by migrating the CNI of each node one by one. The cluster network will work as normal during the network migration. An "Offline" migration operation will cause service interruption. During an "Offline" migration, two rounds of node reboots are required. The cluster network will be malfunctioning during the network migration. When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default value is "Offline". + type: string + enum: + - Live + - Offline + - "" + mtu: + description: mtu contains the MTU migration configuration. Set this to allow changing the MTU values for the default network. If unset, the operation of changing the MTU for the default network will be rejected. + type: object + properties: + machine: + description: machine contains MTU migration configuration for the machine's uplink. Needs to be migrated along with the default network MTU unless the current uplink MTU already accommodates the default network MTU. + type: object + properties: + from: + description: from is the MTU to migrate from. + type: integer + format: int32 + minimum: 0 + to: + description: to is the MTU to migrate to. + type: integer + format: int32 + minimum: 0 + network: + description: network contains information about MTU migration for the default network. Migrations are only allowed to MTU values lower than the machine's uplink MTU by the minimum appropriate offset. + type: object + properties: + from: + description: from is the MTU to migrate from. + type: integer + format: int32 + minimum: 0 + to: + description: to is the MTU to migrate to. + type: integer + format: int32 + minimum: 0 + networkType: + description: networkType is the target type of network migration. Set this to the target network type to allow changing the default network. If unset, the operation of changing cluster default network plugin will be rejected. The supported values are OpenShiftSDN, OVNKubernetes + type: string + x-kubernetes-validations: + - rule: '!has(self.mtu) || !has(self.networkType) || self.networkType == '''' || has(self.mode) && self.mode == ''Live''' + message: networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + serviceNetwork: + description: serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth. + type: array + items: + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + useMultiNetworkPolicy: + description: useMultiNetworkPolicy enables a controller which allows for MultiNetworkPolicy objects to be used on additional networks as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy objects, but NetworkPolicy objects only apply to the primary interface. With MultiNetworkPolicy, you can control the traffic that a pod can receive over the secondary interfaces. If unset, this property defaults to 'false' and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is 'true' then the value of this field is ignored. + type: boolean + status: + description: NetworkStatus is detailed operator status, which is distilled up to the Network clusteroperator object. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml index d90a135578..93e34a5dfc 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml @@ -90,12 +90,13 @@ spec: type: object type: object driverType: - description: 'driverType indicates type of CSI driver for which the driverConfig is being applied to. Valid values are: AWS, Azure, GCP, vSphere and omitted. Consumers should treat unknown values as a NO-OP.' + description: 'driverType indicates type of CSI driver for which the driverConfig is being applied to. Valid values are: AWS, Azure, GCP, IBMCloud, vSphere and omitted. Consumers should treat unknown values as a NO-OP.' enum: - "" - AWS - Azure - GCP + - IBMCloud - vSphere type: string gcp: @@ -132,6 +133,18 @@ spec: - projectID type: object type: object + ibmcloud: + description: ibmcloud is used to configure the IBM Cloud CSI driver. + properties: + encryptionKeyCRN: + description: encryptionKeyCRN is the IBM Cloud CRN of the customer-managed root key to use for disk encryption of volumes for the default storage classes. + maxLength: 154 + minLength: 144 + pattern: ^crn:v[0-9]+:bluemix:(public|private):(kms|hs-crypto):[a-z-]+:a/[0-9a-f]+:[0-9a-f-]{36}:key:[0-9a-f-]{36}$ + type: string + required: + - encryptionKeyCRN + type: object vSphere: description: vsphere is used to configure the vsphere CSI driver. properties: @@ -144,6 +157,9 @@ spec: required: - driverType type: object + x-kubernetes-validations: + - message: ibmcloud must be set if driverType is 'IBMCloud', but remain unset otherwise + rule: 'has(self.driverType) && self.driverType == ''IBMCloud'' ? has(self.ibmcloud) : !has(self.ibmcloud)' logLevel: default: Normal description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." diff --git a/vendor/github.com/openshift/api/operator/v1/custom.network.testsuite.yaml b/vendor/github.com/openshift/api/operator/v1/custom.network.testsuite.yaml new file mode 100644 index 0000000000..ab12cfdd4e --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/custom.network.testsuite.yaml @@ -0,0 +1,100 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[CustomNoUpgrade] Network" +crd: 0000_70_cluster-network-operator_01-CustomNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create migration mode + initial: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + migration: + mode: Live + expected: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + disableNetworkDiagnostics: false + logLevel: Normal + operatorLogLevel: Normal + migration: + mode: Live + - name: Should be able to create mtu migration without setting the migration mode + initial: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + migration: + mtu: + network: + from: 1450 + to: 1400 + expected: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + disableNetworkDiagnostics: false + logLevel: Normal + operatorLogLevel: Normal + migration: + mtu: + network: + from: 1450 + to: 1400 + - name: Should be able to create networkType migration in in offline migration mode + initial: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + migration: + networkType: OVNKubernetes + mode: Offline + expected: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + disableNetworkDiagnostics: false + logLevel: Normal + operatorLogLevel: Normal + migration: + networkType: OVNKubernetes + mode: Offline + - name: Should throw an error when mtu and networkType migration is created in offline migration mode + initial: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + migration: + networkType: OVNKubernetes + mtu: + network: + from: 1450 + to: 1400 + mode: Offline + expectedError: "networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration" + - name: Should be able to create mtu and networkType migration in live migration mode + initial: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + migration: + networkType: OVNKubernetes + mtu: + network: + from: 1450 + to: 1400 + mode: Live + expected: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + disableNetworkDiagnostics: false + logLevel: Normal + operatorLogLevel: Normal + migration: + networkType: OVNKubernetes + mtu: + network: + from: 1450 + to: 1400 + mode: Live diff --git a/vendor/github.com/openshift/api/operator/v1/stable.clustercsidriver.testsuite.yaml b/vendor/github.com/openshift/api/operator/v1/stable.clustercsidriver.testsuite.yaml index dd43c63e1a..7a159da860 100644 --- a/vendor/github.com/openshift/api/operator/v1/stable.clustercsidriver.testsuite.yaml +++ b/vendor/github.com/openshift/api/operator/v1/stable.clustercsidriver.testsuite.yaml @@ -18,3 +18,24 @@ tests: spec: logLevel: Normal operatorLogLevel: Normal + - name: IBM Cloud CSIDriverType must have a defined IBM Cloud spec + initial: | + apiVersion: operator.openshift.io/v1 + kind: ClusterCSIDriver + metadata: + name: csi.sharedresource.openshift.io + spec: + driverConfig: + driverType: IBMCloud + expectedError: "Invalid value: \"object\": ibmcloud must be set if driverType is 'IBMCloud', but remain unset otherwise" + - name: IBM Cloud spec must have an EncryptionKeyCRN defined + initial: | + apiVersion: operator.openshift.io/v1 + kind: ClusterCSIDriver + metadata: + name: csi.sharedresource.openshift.io + spec: + driverConfig: + driverType: IBMCloud + ibmcloud: {} + expectedError: "spec.driverConfig.ibmcloud.encryptionKeyCRN: Required value, : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation" diff --git a/vendor/github.com/openshift/api/operator/v1/stable.ingresscontroller.testsuite.yaml b/vendor/github.com/openshift/api/operator/v1/stable.ingresscontroller.testsuite.yaml index 6a816ee728..903d8e60c5 100644 --- a/vendor/github.com/openshift/api/operator/v1/stable.ingresscontroller.testsuite.yaml +++ b/vendor/github.com/openshift/api/operator/v1/stable.ingresscontroller.testsuite.yaml @@ -476,305 +476,3 @@ tests: set: value: DENY expectedError: 'IngressController.operator.openshift.io "default-not-allowed-values" is invalid: [spec.httpHeaders.actions.response[0].action.type: Required value, : Invalid value: "null": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]' - - - name: "Should require spec.logging.access.destination if spec.logging.access is not null" - initial: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: access-logging-null-destination - namespace: openshift-ingress-operator - spec: - logging: - access: {} - expectedError: "spec.logging.access.destination: Required value" - - name: "Should require spec.logging.access.destination.type if spec.logging.access.destination is not null" - initial: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: access-logging-empty-destination-type - namespace: openshift-ingress-operator - spec: - logging: - access: - destination: - container: {} - expectedError: "spec.logging.access.destination.type: Required value" - - name: "Should be able to specify empty spec.logging.access.destination.container with type: Container" - initial: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: container-logging-empty-settings - namespace: openshift-ingress-operator - spec: - logging: - access: - destination: - type: Container - container: {} - expected: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: container-logging-empty-settings - namespace: openshift-ingress-operator - spec: - httpEmptyRequestsPolicy: Respond - logging: - access: - destination: - type: Container - container: - maxLength: 1024 - logEmptyRequests: Log - - name: "Should be able to omit spec.logging.access.destination.container with type: Container" - initial: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: container-logging-null-settings - namespace: openshift-ingress-operator - spec: - logging: - access: - destination: - type: Container - expected: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: container-logging-null-settings - namespace: openshift-ingress-operator - spec: - httpEmptyRequestsPolicy: Respond - logging: - access: - destination: - type: Container - logEmptyRequests: Log - - name: "Should be able to specify spec.logging.access.destination.syslog with type: Syslog" - initial: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: syslog - namespace: openshift-ingress-operator - spec: - logging: - access: - destination: - type: Syslog - syslog: - address: 1.2.3.4 - port: 514 - expected: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: syslog - namespace: openshift-ingress-operator - spec: - httpEmptyRequestsPolicy: Respond - logging: - access: - destination: - type: Syslog - syslog: - address: 1.2.3.4 - maxLength: 1024 - port: 514 - logEmptyRequests: Log - - name: "Should not be able to specify empty spec.logging.access.destination.syslog with type: Syslog" - initial: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: syslog-empty-settings - namespace: openshift-ingress-operator - spec: - logging: - access: - destination: - type: Syslog - syslog: {} - expectedError: "address: Required value" - # OpenShift 4.5 added the access logging API without validation to prevent - # null spec.logging.access.destination.syslog with type: Syslog, so validation - # must forevermore allow this combination. (The ingress operator doesn't - # actually enable syslog logging if the syslog field is null.) - - name: "Should be able to omit spec.logging.access.destination.syslog with type: Syslog" - initial: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: syslog-null-settings - namespace: openshift-ingress-operator - spec: - logging: - access: - destination: - type: Syslog - expected: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: syslog-null-settings - namespace: openshift-ingress-operator - spec: - httpEmptyRequestsPolicy: Respond - logging: - access: - destination: - type: Syslog - logEmptyRequests: Log - # OpenShift 4.5 added the access logging API without validation to prevent - # specifying spec.logging.access.destination.syslog with type: Container, so - # validation must forevermore allow this combination. (The ingress operator - # enables container logging and ignores the syslog settings in this case.) - - name: "Should be able to specify spec.logging.access.destination.syslog with type: Container" - initial: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: container-logging-with-bogus-syslog-configuration - namespace: openshift-ingress-operator - spec: - logging: - access: - destination: - type: Container - container: {} - syslog: - address: 1.2.3.4 - maxLength: 1024 - port: 514 - expected: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: container-logging-with-bogus-syslog-configuration - namespace: openshift-ingress-operator - spec: - httpEmptyRequestsPolicy: Respond - logging: - access: - destination: - type: Container - container: - maxLength: 1024 - syslog: - address: 1.2.3.4 - maxLength: 1024 - port: 514 - logEmptyRequests: Log - - name: "Should be able to specify spec.logging.access.destination.container with type: Syslog" - initial: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: syslog-logging-with-bogus-container-configuration - namespace: openshift-ingress-operator - spec: - logging: - access: - destination: - type: Syslog - container: {} - syslog: - address: 1.2.3.4 - maxLength: 1024 - port: 514 - expected: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: syslog-logging-with-bogus-container-configuration - namespace: openshift-ingress-operator - spec: - httpEmptyRequestsPolicy: Respond - logging: - access: - destination: - type: Syslog - container: - maxLength: 1024 - syslog: - address: 1.2.3.4 - maxLength: 1024 - port: 514 - logEmptyRequests: Log - # OpenShift 4.14 added the spec.logging.access.destination.container.maxLength - # field. As noted above, we must allow - # spec.logging.access.destination.container when type: Syslog is specified. - # Also, due to defaulting, we must allow - # spec.logging.access.destination.container.maxLength with the default value - # because the default value gets set automatically when - # spec.logging.access.destination.container is not null. However, we do not - # need to allow spec.logging.access.destination.container.maxLength with a - # non-default value when type: Syslog is specified. - - name: "Should be able to specify the default value for spec.logging.access.destination.container.maxLength with type: Syslog" - initial: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: syslog-logging-with-default-container-maxlength - namespace: openshift-ingress-operator - spec: - logging: - access: - destination: - type: Syslog - container: - maxLength: 1024 - expected: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: syslog-logging-with-default-container-maxlength - namespace: openshift-ingress-operator - spec: - httpEmptyRequestsPolicy: Respond - logging: - access: - destination: - type: Syslog - container: - maxLength: 1024 - logEmptyRequests: Log - - name: "Should not be able to specify a non-default value for spec.logging.access.destination.container.maxLength with type: Syslog" - initial: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: syslog-logging-with-non-default-container-maxlength - namespace: openshift-ingress-operator - spec: - logging: - access: - destination: - type: Syslog - container: - maxLength: 2048 - expectedError: "container cannot be specified unless type is Container" - - name: "Should not be able to specify spec.logging.access.destination.syslog and a non-default value for spec.logging.access.destination.container.maxLength" - initial: | - apiVersion: operator.openshift.io/v1 - kind: IngressController - metadata: - name: container-logging-with-non-default-syslog-maxlength - namespace: openshift-ingress-operator - spec: - httpEmptyRequestsPolicy: Respond - logging: - access: - destination: - type: Container - container: - maxLength: 2048 - syslog: - address: 1.2.3.4 - maxLength: 1024 - port: 514 - expectedError: "container and syslog cannot both be specified at the same time" diff --git a/vendor/github.com/openshift/api/operator/v1/stable.network.testsuite.yaml b/vendor/github.com/openshift/api/operator/v1/stable.network.testsuite.yaml index 698e4bf48c..3d65b5dcaf 100644 --- a/vendor/github.com/openshift/api/operator/v1/stable.network.testsuite.yaml +++ b/vendor/github.com/openshift/api/operator/v1/stable.network.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] Network" -crd: 0000_70_cluster-network-operator_01.crd.yaml +crd: 0000_70_cluster-network-operator_01-Default.crd.yaml tests: onCreate: - name: Should be able to create a minimal Network @@ -227,4 +227,40 @@ tests: ipv6: internalMasqueradeSubnet: "abcd:ef01:2345:6789:abcd:ef01:2345::/125" expectedError: "Invalid value: \"string\": a valid IPv6 address must contain 8 segments unless elided (::), in which case it must contain at most 6 non-empty segments" - \ No newline at end of file + - name: "Should not be able to pass an invalid IPV6 CIDR with a segment that contains invalid values" + initial: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + defaultNetwork: + ovnKubernetesConfig: + gatewayConfig: + ipv6: + internalMasqueradeSubnet: "xbcd:ef01:2345:6789::2345:6789/20" + expectedError: "Invalid value: \"string\": each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 1" + - name: "Should not be able to pass an invalid IPV6 CIDR with a segment that is 5 characters long" + initial: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + defaultNetwork: + ovnKubernetesConfig: + gatewayConfig: + ipv6: + internalMasqueradeSubnet: "abcd:eff01:2345:6789::2345:6789/20" + expectedError: "Invalid value: \"string\": each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 2" + - name: Should not be able to create migration mode + initial: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + migration: + mode: Live + expected: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + disableNetworkDiagnostics: false + logLevel: Normal + operatorLogLevel: Normal + migration: {} diff --git a/vendor/github.com/openshift/api/operator/v1/techpreview.network.testsuite.yaml b/vendor/github.com/openshift/api/operator/v1/techpreview.network.testsuite.yaml new file mode 100644 index 0000000000..6eacb42df4 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/techpreview.network.testsuite.yaml @@ -0,0 +1,100 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreviewNoUpgrade] Network" +crd: 0000_70_cluster-network-operator_01-TechPreviewNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create migration mode + initial: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + migration: + mode: Live + expected: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + disableNetworkDiagnostics: false + logLevel: Normal + operatorLogLevel: Normal + migration: + mode: Live + - name: Should be able to create mtu migration without setting the migration mode + initial: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + migration: + mtu: + network: + from: 1450 + to: 1400 + expected: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + disableNetworkDiagnostics: false + logLevel: Normal + operatorLogLevel: Normal + migration: + mtu: + network: + from: 1450 + to: 1400 + - name: Should be able to create networkType migration in in offline migration mode + initial: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + migration: + networkType: OVNKubernetes + mode: Offline + expected: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + disableNetworkDiagnostics: false + logLevel: Normal + operatorLogLevel: Normal + migration: + networkType: OVNKubernetes + mode: Offline + - name: Should throw an error when mtu and networkType migration is created in offline migration mode + initial: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + migration: + networkType: OVNKubernetes + mtu: + network: + from: 1450 + to: 1400 + mode: Offline + expectedError: "networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration" + - name: Should be able to create mtu and networkType migration in live migration mode + initial: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + migration: + networkType: OVNKubernetes + mtu: + network: + from: 1450 + to: 1400 + mode: Live + expected: | + apiVersion: operator.openshift.io/v1 + kind: Network + spec: + disableNetworkDiagnostics: false + logLevel: Normal + operatorLogLevel: Normal + migration: + networkType: OVNKubernetes + mtu: + network: + from: 1450 + to: 1400 + mode: Live diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go index d2f0589103..8e9853b06f 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -109,23 +109,25 @@ type ClusterCSIDriverSpec struct { } // CSIDriverType indicates type of CSI driver being configured. -// +kubebuilder:validation:Enum="";AWS;Azure;GCP;vSphere +// +kubebuilder:validation:Enum="";AWS;Azure;GCP;IBMCloud;vSphere type CSIDriverType string const ( - AWSDriverType CSIDriverType = "AWS" - AzureDriverType CSIDriverType = "Azure" - GCPDriverType CSIDriverType = "GCP" - VSphereDriverType CSIDriverType = "vSphere" + AWSDriverType CSIDriverType = "AWS" + AzureDriverType CSIDriverType = "Azure" + GCPDriverType CSIDriverType = "GCP" + IBMCloudDriverType CSIDriverType = "IBMCloud" + VSphereDriverType CSIDriverType = "vSphere" ) // CSIDriverConfigSpec defines configuration spec that can be // used to optionally configure a specific CSI Driver. +// +kubebuilder:validation:XValidation:rule="has(self.driverType) && self.driverType == 'IBMCloud' ? has(self.ibmcloud) : !has(self.ibmcloud)",message="ibmcloud must be set if driverType is 'IBMCloud', but remain unset otherwise" // +union type CSIDriverConfigSpec struct { // driverType indicates type of CSI driver for which the // driverConfig is being applied to. - // Valid values are: AWS, Azure, GCP, vSphere and omitted. + // Valid values are: AWS, Azure, GCP, IBMCloud, vSphere and omitted. // Consumers should treat unknown values as a NO-OP. // +kubebuilder:validation:Required // +unionDiscriminator @@ -143,6 +145,10 @@ type CSIDriverConfigSpec struct { // +optional GCP *GCPCSIDriverConfigSpec `json:"gcp,omitempty"` + // ibmcloud is used to configure the IBM Cloud CSI driver. + // +optional + IBMCloud *IBMCloudCSIDriverConfigSpec `json:"ibmcloud,omitempty"` + // vsphere is used to configure the vsphere CSI driver. // +optional VSphere *VSphereCSIDriverConfigSpec `json:"vSphere,omitempty"` @@ -248,6 +254,17 @@ type GCPCSIDriverConfigSpec struct { KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"` } +// IBMCloudCSIDriverConfigSpec defines the properties that can be configured for the IBM Cloud CSI driver. +type IBMCloudCSIDriverConfigSpec struct { + // encryptionKeyCRN is the IBM Cloud CRN of the customer-managed root key to use + // for disk encryption of volumes for the default storage classes. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength:=154 + // +kubebuilder:validation:MinLength:=144 + // +kubebuilder:validation:Pattern:=`^crn:v[0-9]+:bluemix:(public|private):(kms|hs-crypto):[a-z-]+:a/[0-9a-f]+:[0-9a-f-]{36}:key:[0-9a-f-]{36}$` + EncryptionKeyCRN string `json:"encryptionKeyCRN"` +} + // VSphereCSIDriverConfigSpec defines properties that // can be configured for vsphere CSI driver. type VSphereCSIDriverConfigSpec struct { diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index c28b73ed91..3d9f512a93 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -1054,22 +1054,6 @@ type ContainerLoggingDestinationParameters struct { } // LoggingDestination describes a destination for log messages. -// + --- -// + We added the spec.logging.access.destination.container and -// + spec.logging.access.destination.syslog fields in OpenShift 4.5 without any -// + validation rules to prevent the user from specifying both fields at the -// + same time. Adding such validation rules in a subsequent release would -// + break API compatibility between releases. -// + -// + We added the spec.logging.access.destination.container.maxLength field in -// + OpenShift 4.14, and we added the following validation rules in the same -// + release, so they do not break compatibility between releases. Note that -// + API defaulting requires us to allow the default value for -// + container.maxLength if container is non-null even when type is Syslog. -// + -// +kubebuilder:validation:XValidation:rule=`!has(self.type) || self.type == "Container" || !has(self.container) || !has(self.container.maxLength) || self.container.maxLength == 1024`,message="container cannot be specified unless type is Container" -// +kubebuilder:validation:XValidation:rule=`!has(self.container) || !has(self.container.maxLength) || self.container.maxLength == 1024 || !has(self.syslog)`,message="container and syslog cannot both be specified at the same time" -// // +union type LoggingDestination struct { // type is the type of destination for logs. It must be one of the diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index 17fbbed0dc..cec3b63181 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -116,7 +116,20 @@ type NetworkSpec struct { Migration *NetworkMigration `json:"migration,omitempty"` } +// NetworkMigrationMode is an enumeration of the possible mode of the network migration +// Valid values are "Live", "Offline" and omitted. +// +kubebuilder:validation:Enum:=Live;Offline;"" +type NetworkMigrationMode string + +const ( + // A "Live" migration operation will not cause service interruption by migrating the CNI of each node one by one. The cluster network will work as normal during the network migration. + LiveNetworkMigrationMode NetworkMigrationMode = "Live" + // An "Offline" migration operation will cause service interruption. During an "Offline" migration, two rounds of node reboots are required. The cluster network will be malfunctioning during the network migration. + OfflineNetworkMigrationMode NetworkMigrationMode = "Offline" +) + // NetworkMigration represents the cluster network configuration. +// +openshift:validation:FeatureSetAwareXValidation:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,rule="!has(self.mtu) || !has(self.networkType) || self.networkType == '' || has(self.mode) && self.mode == 'Live'",message="networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration" type NetworkMigration struct { // networkType is the target type of network migration. Set this to the // target network type to allow changing the default network. If unset, the @@ -137,6 +150,16 @@ type NetworkMigration struct { // supported features. // +optional Features *FeaturesMigration `json:"features,omitempty"` + + // mode indicates the mode of network migration. + // The supported values are "Live", "Offline" and omitted. + // A "Live" migration operation will not cause service interruption by migrating the CNI of each node one by one. The cluster network will work as normal during the network migration. + // An "Offline" migration operation will cause service interruption. During an "Offline" migration, two rounds of node reboots are required. The cluster network will be malfunctioning during the network migration. + // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. + // The current default value is "Offline". + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +optional + Mode NetworkMigrationMode `json:"mode"` } type FeaturesMigration struct { @@ -212,10 +235,6 @@ type DefaultNetworkDefinition struct { // ovnKubernetesConfig configures the ovn-kubernetes plugin. // +optional OVNKubernetesConfig *OVNKubernetesConfig `json:"ovnKubernetesConfig,omitempty"` - - // KuryrConfig configures the kuryr plugin - // +optional - KuryrConfig *KuryrConfig `json:"kuryrConfig,omitempty"` } // SimpleMacvlanConfig contains configurations for macvlan interface. @@ -350,74 +369,6 @@ type OpenShiftSDNConfig struct { EnableUnidling *bool `json:"enableUnidling,omitempty"` } -// KuryrConfig configures the Kuryr-Kubernetes SDN -type KuryrConfig struct { - // The port kuryr-daemon will listen for readiness and liveness requests. - // +kubebuilder:validation:Minimum=0 - // +optional - DaemonProbesPort *uint32 `json:"daemonProbesPort,omitempty"` - - // The port kuryr-controller will listen for readiness and liveness requests. - // +kubebuilder:validation:Minimum=0 - // +optional - ControllerProbesPort *uint32 `json:"controllerProbesPort,omitempty"` - - // openStackServiceNetwork contains the CIDR of network from which to allocate IPs for - // OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses - // two IPs from that network for each loadbalancer - one given by OpenShift and second - // for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's - // IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` - // needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` - // must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then - // make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that - // are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set - // cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix - // size by 1. - // +optional - OpenStackServiceNetwork string `json:"openStackServiceNetwork,omitempty"` - - // enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port - // pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact - // that it takes a significant amount of time to create one. It creates a number of ports when - // the first pod that is configured to use the dedicated network for pods is created in a namespace, - // and keeps them ready to be attached to pods. Port prepopulation is disabled by default. - // +optional - EnablePortPoolsPrepopulation bool `json:"enablePortPoolsPrepopulation,omitempty"` - - // poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. - // If the number of ports exceeds this setting, free ports will get deleted. Setting 0 - // will disable this upper bound, effectively preventing pools from shrinking and this - // is the default value. For more information about port pools see - // enablePortPoolsPrepopulation setting. - // +kubebuilder:validation:Minimum=0 - // +optional - PoolMaxPorts uint `json:"poolMaxPorts,omitempty"` - - // poolMinPorts sets a minimum number of free ports that should be kept in a port pool. - // If the number of ports is lower than this setting, new ports will get created and - // added to pool. The default is 1. For more information about port pools see - // enablePortPoolsPrepopulation setting. - // +kubebuilder:validation:Minimum=1 - // +optional - PoolMinPorts uint `json:"poolMinPorts,omitempty"` - - // poolBatchPorts sets a number of ports that should be created in a single batch request - // to extend the port pool. The default is 3. For more information about port pools see - // enablePortPoolsPrepopulation setting. - // +kubebuilder:validation:Minimum=0 - // +optional - PoolBatchPorts *uint `json:"poolBatchPorts,omitempty"` - - // mtu is the MTU that Kuryr should use when creating pod networks in Neutron. - // The value has to be lower or equal to the MTU of the nodes network and Neutron has - // to allow creation of tenant networks with such MTU. If unset Pod networks will be - // created with the same MTU as the nodes network has. This also affects the services - // network created by cluster-network-operator. - // +kubebuilder:validation:Minimum=0 - // +optional - MTU *uint32 `json:"mtu,omitempty"` -} - // ovnKubernetesConfig contains the configuration parameters for networks // using the ovn-kubernetes network project type OVNKubernetesConfig struct { @@ -553,14 +504,14 @@ type IPv6GatewayConfig struct { // +kubebuilder:validation:XValidation:rule="self.split('/').size() == 2 && [int(self.split('/')[1])].all(x, x <= 125 && x >= 0)",message="subnet must be in the range /0 to /125 inclusive" // +kubebuilder:validation:XValidation:rule="self.indexOf('::') == self.lastIndexOf('::')",message="IPv6 addresses must contain at most one '::' and may only be shortened once" // +kubebuilder:validation:XValidation:rule="self.contains('::') ? self.split('/')[0].split(':').size() <= 8 : self.split('/')[0].split(':').size() == 8",message="a valid IPv6 address must contain 8 segments unless elided (::), in which case it must contain at most 6 non-empty segments" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=1 ? [self.split('/')[0].split(':', 8)[0]].all(x, x == '' || x.matches('[0-9A-Fa-f]{1,4}')) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 1" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=2 ? [self.split('/')[0].split(':', 8)[1]].all(x, x == '' || x.matches('[0-9A-Fa-f]{1,4}')) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 2" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=3 ? [self.split('/')[0].split(':', 8)[2]].all(x, x == '' || x.matches('[0-9A-Fa-f]{1,4}')) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 3" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=4 ? [self.split('/')[0].split(':', 8)[3]].all(x, x == '' || x.matches('[0-9A-Fa-f]{1,4}')) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 4" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=5 ? [self.split('/')[0].split(':', 8)[4]].all(x, x == '' || x.matches('[0-9A-Fa-f]{1,4}')) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 5" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=6 ? [self.split('/')[0].split(':', 8)[5]].all(x, x == '' || x.matches('[0-9A-Fa-f]{1,4}')) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 6" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=7 ? [self.split('/')[0].split(':', 8)[6]].all(x, x == '' || x.matches('[0-9A-Fa-f]{1,4}')) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 7" - // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=8 ? [self.split('/')[0].split(':', 8)[7]].all(x, x == '' || x.matches('[0-9A-Fa-f]{1,4}')) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 8" + // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=1 ? [self.split('/')[0].split(':', 8)[0]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 1" + // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=2 ? [self.split('/')[0].split(':', 8)[1]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 2" + // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=3 ? [self.split('/')[0].split(':', 8)[2]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 3" + // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=4 ? [self.split('/')[0].split(':', 8)[3]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 4" + // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=5 ? [self.split('/')[0].split(':', 8)[4]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 5" + // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=6 ? [self.split('/')[0].split(':', 8)[5]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 6" + // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=7 ? [self.split('/')[0].split(':', 8)[6]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 7" + // +kubebuilder:validation:XValidation:rule="self.split('/')[0].split(':').size() >=8 ? [self.split('/')[0].split(':', 8)[7]].all(x, x == '' || (x.matches('^[0-9A-Fa-f]{1,4}$')) && size(x)<5 ) : true",message="each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 8" // +kubebuilder:validation:XValidation:rule="!self.contains('.')",message="IPv6 dual addresses are not permitted, value should not contain `.` characters" // +optional InternalMasqueradeSubnet string `json:"internalMasqueradeSubnet,omitempty"` @@ -692,9 +643,6 @@ const ( // This is currently not implemented. NetworkTypeOVNKubernetes NetworkType = "OVNKubernetes" - // NetworkTypeKuryr means the kuryr-kubernetes project will be configured. - NetworkTypeKuryr NetworkType = "Kuryr" - // NetworkTypeRaw NetworkTypeRaw NetworkType = "Raw" diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index f93762e4a0..7823bb40cc 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -304,6 +304,11 @@ func (in *CSIDriverConfigSpec) DeepCopyInto(out *CSIDriverConfigSpec) { *out = new(GCPCSIDriverConfigSpec) (*in).DeepCopyInto(*out) } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudCSIDriverConfigSpec) + **out = **in + } if in.VSphere != nil { in, out := &in.VSphere, &out.VSphere *out = new(VSphereCSIDriverConfigSpec) @@ -1135,11 +1140,6 @@ func (in *DefaultNetworkDefinition) DeepCopyInto(out *DefaultNetworkDefinition) *out = new(OVNKubernetesConfig) (*in).DeepCopyInto(*out) } - if in.KuryrConfig != nil { - in, out := &in.KuryrConfig, &out.KuryrConfig - *out = new(KuryrConfig) - (*in).DeepCopyInto(*out) - } return } @@ -1692,6 +1692,22 @@ func (in *HybridOverlayConfig) DeepCopy() *HybridOverlayConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudCSIDriverConfigSpec) DeepCopyInto(out *IBMCloudCSIDriverConfigSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudCSIDriverConfigSpec. +func (in *IBMCloudCSIDriverConfigSpec) DeepCopy() *IBMCloudCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(IBMCloudCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IBMLoadBalancerParameters) DeepCopyInto(out *IBMLoadBalancerParameters) { *out = *in @@ -2750,42 +2766,6 @@ func (in *KubeStorageVersionMigratorStatus) DeepCopy() *KubeStorageVersionMigrat return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KuryrConfig) DeepCopyInto(out *KuryrConfig) { - *out = *in - if in.DaemonProbesPort != nil { - in, out := &in.DaemonProbesPort, &out.DaemonProbesPort - *out = new(uint32) - **out = **in - } - if in.ControllerProbesPort != nil { - in, out := &in.ControllerProbesPort, &out.ControllerProbesPort - *out = new(uint32) - **out = **in - } - if in.PoolBatchPorts != nil { - in, out := &in.PoolBatchPorts, &out.PoolBatchPorts - *out = new(uint) - **out = **in - } - if in.MTU != nil { - in, out := &in.MTU, &out.MTU - *out = new(uint32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KuryrConfig. -func (in *KuryrConfig) DeepCopy() *KuryrConfig { - if in == nil { - return nil - } - out := new(KuryrConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerStrategy) DeepCopyInto(out *LoadBalancerStrategy) { *out = *in diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 96122157d9..d50e77e44d 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -420,10 +420,11 @@ func (AzureDiskEncryptionSet) SwaggerDoc() map[string]string { var map_CSIDriverConfigSpec = map[string]string{ "": "CSIDriverConfigSpec defines configuration spec that can be used to optionally configure a specific CSI Driver.", - "driverType": "driverType indicates type of CSI driver for which the driverConfig is being applied to. Valid values are: AWS, Azure, GCP, vSphere and omitted. Consumers should treat unknown values as a NO-OP.", + "driverType": "driverType indicates type of CSI driver for which the driverConfig is being applied to. Valid values are: AWS, Azure, GCP, IBMCloud, vSphere and omitted. Consumers should treat unknown values as a NO-OP.", "aws": "aws is used to configure the AWS CSI driver.", "azure": "azure is used to configure the Azure CSI driver.", "gcp": "gcp is used to configure the GCP CSI driver.", + "ibmcloud": "ibmcloud is used to configure the IBM Cloud CSI driver.", "vSphere": "vsphere is used to configure the vsphere CSI driver.", } @@ -490,6 +491,15 @@ func (GCPKMSKeyReference) SwaggerDoc() map[string]string { return map_GCPKMSKeyReference } +var map_IBMCloudCSIDriverConfigSpec = map[string]string{ + "": "IBMCloudCSIDriverConfigSpec defines the properties that can be configured for the IBM Cloud CSI driver.", + "encryptionKeyCRN": "encryptionKeyCRN is the IBM Cloud CRN of the customer-managed root key to use for disk encryption of volumes for the default storage classes.", +} + +func (IBMCloudCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_IBMCloudCSIDriverConfigSpec +} + var map_VSphereCSIDriverConfigSpec = map[string]string{ "": "VSphereCSIDriverConfigSpec defines properties that can be configured for vsphere CSI driver.", "topologyCategories": "topologyCategories indicates tag categories with which vcenter resources such as hostcluster or datacenter were tagged with. If cluster Infrastructure object has a topology, values specified in Infrastructure object will be used and modifications to topologyCategories will be rejected.", @@ -1012,7 +1022,7 @@ func (LoadBalancerStrategy) SwaggerDoc() map[string]string { } var map_LoggingDestination = map[string]string{ - "": "LoggingDestination describes a destination for log messages. ", + "": "LoggingDestination describes a destination for log messages.", "type": "type is the type of destination for logs. It must be one of the following:\n\n* Container\n\nThe ingress operator configures the sidecar container named \"logs\" on the ingress controller pod and configures the ingress controller to write logs to the sidecar. The logs are then available as container logs. The expectation is that the administrator configures a custom logging solution that reads logs from this sidecar. Note that using container logs means that logs may be dropped if the rate of logs exceeds the container runtime's or the custom logging solution's capacity.\n\n* Syslog\n\nLogs are sent to a syslog endpoint. The administrator must specify an endpoint that can receive syslog messages. The expectation is that the administrator has configured a custom syslog instance.", "syslog": "syslog holds parameters for a syslog endpoint. Present only if type is Syslog.", "container": "container holds parameters for the Container logging destination. Present only if type is Container.", @@ -1290,7 +1300,6 @@ var map_DefaultNetworkDefinition = map[string]string{ "type": "type is the type of network All NetworkTypes are supported except for NetworkTypeRaw", "openshiftSDNConfig": "openShiftSDNConfig configures the openshift-sdn plugin", "ovnKubernetesConfig": "ovnKubernetesConfig configures the ovn-kubernetes plugin.", - "kuryrConfig": "KuryrConfig configures the kuryr plugin", } func (DefaultNetworkDefinition) SwaggerDoc() map[string]string { @@ -1383,22 +1392,6 @@ func (IPv6GatewayConfig) SwaggerDoc() map[string]string { return map_IPv6GatewayConfig } -var map_KuryrConfig = map[string]string{ - "": "KuryrConfig configures the Kuryr-Kubernetes SDN", - "daemonProbesPort": "The port kuryr-daemon will listen for readiness and liveness requests.", - "controllerProbesPort": "The port kuryr-controller will listen for readiness and liveness requests.", - "openStackServiceNetwork": "openStackServiceNetwork contains the CIDR of network from which to allocate IPs for OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses two IPs from that network for each loadbalancer - one given by OpenShift and second for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix size by 1.", - "enablePortPoolsPrepopulation": "enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create one. It creates a number of ports when the first pod that is configured to use the dedicated network for pods is created in a namespace, and keeps them ready to be attached to pods. Port prepopulation is disabled by default.", - "poolMaxPorts": "poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. If the number of ports exceeds this setting, free ports will get deleted. Setting 0 will disable this upper bound, effectively preventing pools from shrinking and this is the default value. For more information about port pools see enablePortPoolsPrepopulation setting.", - "poolMinPorts": "poolMinPorts sets a minimum number of free ports that should be kept in a port pool. If the number of ports is lower than this setting, new ports will get created and added to pool. The default is 1. For more information about port pools see enablePortPoolsPrepopulation setting.", - "poolBatchPorts": "poolBatchPorts sets a number of ports that should be created in a single batch request to extend the port pool. The default is 3. For more information about port pools see enablePortPoolsPrepopulation setting.", - "mtu": "mtu is the MTU that Kuryr should use when creating pod networks in Neutron. The value has to be lower or equal to the MTU of the nodes network and Neutron has to allow creation of tenant networks with such MTU. If unset Pod networks will be created with the same MTU as the nodes network has. This also affects the services network created by cluster-network-operator.", -} - -func (KuryrConfig) SwaggerDoc() map[string]string { - return map_KuryrConfig -} - var map_MTUMigration = map[string]string{ "": "MTUMigration MTU contains infomation about MTU migration.", "network": "network contains information about MTU migration for the default network. Migrations are only allowed to MTU values lower than the machine's uplink MTU by the minimum appropriate offset.", @@ -1450,6 +1443,7 @@ var map_NetworkMigration = map[string]string{ "networkType": "networkType is the target type of network migration. Set this to the target network type to allow changing the default network. If unset, the operation of changing cluster default network plugin will be rejected. The supported values are OpenShiftSDN, OVNKubernetes", "mtu": "mtu contains the MTU migration configuration. Set this to allow changing the MTU values for the default network. If unset, the operation of changing the MTU for the default network will be rejected.", "features": "features contains the features migration configuration. Set this to migrate feature configuration when changing the cluster default network provider. if unset, the default operation is to migrate all the configuration of supported features.", + "mode": "mode indicates the mode of network migration. The supported values are \"Live\", \"Offline\" and omitted. A \"Live\" migration operation will not cause service interruption by migrating the CNI of each node one by one. The cluster network will work as normal during the network migration. An \"Offline\" migration operation will cause service interruption. During an \"Offline\" migration, two rounds of node reboots are required. The cluster network will be malfunctioning during the network migration. When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default value is \"Offline\".", } func (NetworkMigration) SwaggerDoc() map[string]string { diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index 58c6bfc70f..6200876fb2 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build dragonfly || freebsd || linux || netbsd || openbsd +//go:build dragonfly || freebsd || linux || netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index 0d12c0851a..dbe680eab8 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -231,3 +231,8 @@ func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) } + +// IoctlLoopConfigure configures all loop device parameters in a single step +func IoctlLoopConfigure(fd int, value *LoopConfig) error { + return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value)) +} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index cbe24150a7..6202638bae 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -519,6 +519,7 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || + $2 == "LOOP_CONFIGURE" || $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || @@ -560,7 +561,7 @@ ccflags="$@" $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && + $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+|BPF_F_LINK)$/ && $2 ~ /^(BPF|DLT)_/ || $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 6f328e3a55..a00c3e5450 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -316,7 +316,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index a5e1c10e34..0f85e29e62 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -61,15 +61,23 @@ func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) ( } //sys fchmodat(dirfd int, path string, mode uint32) (err error) - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior - // and check the flags. Otherwise the mode would be applied to the symlink - // destination which is not what the user expects. - if flags&^AT_SYMLINK_NOFOLLOW != 0 { - return EINVAL - } else if flags&AT_SYMLINK_NOFOLLOW != 0 { - return EOPNOTSUPP +//sys fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) + +func Fchmodat(dirfd int, path string, mode uint32, flags int) error { + // Linux fchmodat doesn't support the flags parameter, but fchmodat2 does. + // Try fchmodat2 if flags are specified. + if flags != 0 { + err := fchmodat2(dirfd, path, mode, flags) + if err == ENOSYS { + // fchmodat2 isn't available. If the flags are known to be valid, + // return EOPNOTSUPP to indicate that fchmodat doesn't support them. + if flags&^(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EINVAL + } else if flags&(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EOPNOTSUPP + } + } + return err } return fchmodat(dirfd, path, mode) } @@ -1302,7 +1310,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index d2882ee04f..b25343c71a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -166,6 +166,20 @@ func Getresgid() (rgid, egid, sgid int) { //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys fcntl(fd int, cmd int, arg int) (n int, err error) +//sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) = SYS_FCNTL + +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return fcntl(int(fd), cmd, arg) +} + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + _, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk)) + return err +} + //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 60c8142d49..21974af064 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -158,7 +158,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } const ImplementsGetwd = true diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index d99d05f1bc..b473038c61 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -1104,7 +1104,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 9c00cbf512..c73cfe2f10 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -486,7 +486,6 @@ const ( BPF_F_ANY_ALIGNMENT = 0x2 BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 - BPF_F_LINK = 0x2000 BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 @@ -1802,6 +1801,7 @@ const ( LOCK_SH = 0x1 LOCK_UN = 0x8 LOOP_CLR_FD = 0x4c01 + LOOP_CONFIGURE = 0x4c0a LOOP_CTL_ADD = 0x4c80 LOOP_CTL_GET_FREE = 0x4c82 LOOP_CTL_REMOVE = 0x4c81 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index faca7a557b..1488d27128 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -37,6 +37,21 @@ func fchmodat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 88bfc28857..a1d061597c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 4cbeff171b..41b5617316 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index b8a67b99af..5b2a740977 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 1123f27571..4019a656f6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index af50a65c0c..f6eda1344a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index 82badae39f..ac4af24f90 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 8fb4ff36a7..55df20ae9d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index 24d7eecb93..f77d532121 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index f469a83ee6..8c1155cbc0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index 9a498a0677..fae140b62c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index c26ca2e1aa..7cc80c58d9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 1f224aa416..9d1e0ff06d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -213,6 +213,12 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fcntl(SB) + RET +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_ppoll(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index bcc920dd25..0688737f49 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index 87a79c7095..da115f9a4b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 997bcd55ae..bbf8399ff5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2671,6 +2671,7 @@ const ( BPF_PROG_TYPE_LSM = 0x1d BPF_PROG_TYPE_SK_LOOKUP = 0x1e BPF_PROG_TYPE_SYSCALL = 0x1f + BPF_PROG_TYPE_NETFILTER = 0x20 BPF_CGROUP_INET_INGRESS = 0x0 BPF_CGROUP_INET_EGRESS = 0x1 BPF_CGROUP_INET_SOCK_CREATE = 0x2 @@ -2715,6 +2716,11 @@ const ( BPF_PERF_EVENT = 0x29 BPF_TRACE_KPROBE_MULTI = 0x2a BPF_LSM_CGROUP = 0x2b + BPF_STRUCT_OPS = 0x2c + BPF_NETFILTER = 0x2d + BPF_TCX_INGRESS = 0x2e + BPF_TCX_EGRESS = 0x2f + BPF_TRACE_UPROBE_MULTI = 0x30 BPF_LINK_TYPE_UNSPEC = 0x0 BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 BPF_LINK_TYPE_TRACING = 0x2 @@ -2725,6 +2731,18 @@ const ( BPF_LINK_TYPE_PERF_EVENT = 0x7 BPF_LINK_TYPE_KPROBE_MULTI = 0x8 BPF_LINK_TYPE_STRUCT_OPS = 0x9 + BPF_LINK_TYPE_NETFILTER = 0xa + BPF_LINK_TYPE_TCX = 0xb + BPF_LINK_TYPE_UPROBE_MULTI = 0xc + BPF_PERF_EVENT_UNSPEC = 0x0 + BPF_PERF_EVENT_UPROBE = 0x1 + BPF_PERF_EVENT_URETPROBE = 0x2 + BPF_PERF_EVENT_KPROBE = 0x3 + BPF_PERF_EVENT_KRETPROBE = 0x4 + BPF_PERF_EVENT_TRACEPOINT = 0x5 + BPF_PERF_EVENT_EVENT = 0x6 + BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_UPROBE_MULTI_RETURN = 0x1 BPF_ANY = 0x0 BPF_NOEXIST = 0x1 BPF_EXIST = 0x2 @@ -2742,6 +2760,8 @@ const ( BPF_F_MMAPABLE = 0x400 BPF_F_PRESERVE_ELEMS = 0x800 BPF_F_INNER_MAP = 0x1000 + BPF_F_LINK = 0x2000 + BPF_F_PATH_FD = 0x4000 BPF_STATS_RUN_TIME = 0x0 BPF_STACK_BUILD_ID_EMPTY = 0x0 BPF_STACK_BUILD_ID_VALID = 0x1 @@ -2762,6 +2782,7 @@ const ( BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_DONT_FRAGMENT = 0x4 BPF_F_SEQ_NUMBER = 0x8 + BPF_F_NO_TUNNEL_KEY = 0x10 BPF_F_TUNINFO_FLAGS = 0x10 BPF_F_INDEX_MASK = 0xffffffff BPF_F_CURRENT_CPU = 0xffffffff @@ -2778,6 +2799,8 @@ const ( BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40 + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 0x80 + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 0x100 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_F_SYSCTL_BASE_NAME = 0x1 @@ -2866,6 +2889,8 @@ const ( BPF_DEVCG_DEV_CHAR = 0x2 BPF_FIB_LOOKUP_DIRECT = 0x1 BPF_FIB_LOOKUP_OUTPUT = 0x2 + BPF_FIB_LOOKUP_SKIP_NEIGH = 0x4 + BPF_FIB_LOOKUP_TBID = 0x8 BPF_FIB_LKUP_RET_SUCCESS = 0x0 BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 @@ -2901,6 +2926,7 @@ const ( BPF_CORE_ENUMVAL_EXISTS = 0xa BPF_CORE_ENUMVAL_VALUE = 0xb BPF_CORE_TYPE_MATCHES = 0xc + BPF_F_TIMER_ABS = 0x1 ) const ( @@ -2979,6 +3005,12 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } +type LoopConfig struct { + Fd uint32 + Size uint32 + Info LoopInfo64 + _ [8]uint64 +} type TIPCSocketAddr struct { Ref uint32 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index fb6cfd0462..47dc579676 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -155,6 +155,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW //sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW //sys SetDefaultDllDirectories(directoryFlags uint32) (err error) +//sys AddDllDirectory(path *uint16) (cookie uintptr, err error) = kernel32.AddDllDirectory +//sys RemoveDllDirectory(cookie uintptr) (err error) = kernel32.RemoveDllDirectory //sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index db6282e00a..146a1f0196 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -184,6 +184,7 @@ var ( procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") @@ -330,6 +331,7 @@ var ( procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory") procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procRemoveDllDirectory = modkernel32.NewProc("RemoveDllDirectory") procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") @@ -1605,6 +1607,15 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func AddDllDirectory(path *uint16) (cookie uintptr, err error) { + r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + cookie = uintptr(r0) + if cookie == 0 { + err = errnoErr(e1) + } + return +} + func AssignProcessToJobObject(job Handle, process Handle) (err error) { r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) if r1 == 0 { @@ -2879,6 +2890,14 @@ func RemoveDirectory(path *uint16) (err error) { return } +func RemoveDllDirectory(cookie uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ResetEvent(event Handle) (err error) { r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) if r1 == 0 { diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index d738725caf..3674914f70 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -126,14 +126,17 @@ type rudimentaryErrorBackoff struct { // OnError will block if it is called more often than the embedded period time. // This will prevent overly tight hot error loops. func (r *rudimentaryErrorBackoff) OnError(error) { + now := time.Now() // start the timer before acquiring the lock r.lastErrorTimeLock.Lock() - defer r.lastErrorTimeLock.Unlock() - d := time.Since(r.lastErrorTime) - if d < r.minPeriod { - // If the time moves backwards for any reason, do nothing - time.Sleep(r.minPeriod - d) - } + d := now.Sub(r.lastErrorTime) r.lastErrorTime = time.Now() + r.lastErrorTimeLock.Unlock() + + // Do not sleep with the lock held because that causes all callers of HandleError to block. + // We only want the current goroutine to block. + // A negative or zero duration causes time.Sleep to return immediately. + // If the time moves backwards for any reason, do nothing. + time.Sleep(r.minPeriod - d) } // GetCaller returns the caller of the function that calls it. diff --git a/vendor/k8s.io/klog/v2/.golangci.yaml b/vendor/k8s.io/klog/v2/.golangci.yaml new file mode 100644 index 0000000000..0d77d65f06 --- /dev/null +++ b/vendor/k8s.io/klog/v2/.golangci.yaml @@ -0,0 +1,6 @@ +linters: + disable-all: true + enable: # sorted alphabetical + - gofmt + - misspell + - revive diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go index f325ded5e9..46de00fb06 100644 --- a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -30,14 +30,16 @@ import ( var ( // Pid is inserted into log headers. Can be overridden for tests. Pid = os.Getpid() + + // Time, if set, will be used instead of the actual current time. + Time *time.Time ) // Buffer holds a single byte.Buffer for reuse. The zero value is ready for // use. It also provides some helper methods for output formatting. type Buffer struct { bytes.Buffer - Tmp [64]byte // temporary byte array for creating headers. - next *Buffer + Tmp [64]byte // temporary byte array for creating headers. } var buffers = sync.Pool{ @@ -122,6 +124,9 @@ func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } _, month, day := now.Date() hour, minute, second := now.Clock() // Lmmdd hh:mm:ss.uuuuuu threadid file:line] @@ -157,6 +162,9 @@ func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string { // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } _, month, day := now.Date() hour, minute, second := now.Clock() // Lmmdd hh:mm:ss.uuuuuu threadid file:line] diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go index b8b6af5c81..cc11bb4802 100644 --- a/vendor/k8s.io/klog/v2/internal/clock/clock.go +++ b/vendor/k8s.io/klog/v2/internal/clock/clock.go @@ -39,16 +39,6 @@ type Clock interface { // Sleep sleeps for the provided duration d. // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. Sleep(d time.Duration) - // Tick returns the channel of a new Ticker. - // This method does not allow to free/GC the backing ticker. Use - // NewTicker from WithTicker instead. - Tick(d time.Duration) <-chan time.Time -} - -// WithTicker allows for injecting fake or real clocks into code that -// needs to do arbitrary things based on time. -type WithTicker interface { - Clock // NewTicker returns a new Ticker. NewTicker(time.Duration) Ticker } @@ -66,7 +56,7 @@ type WithDelayedExecution interface { // WithTickerAndDelayedExecution allows for injecting fake or real clocks // into code that needs Ticker and AfterFunc functionality type WithTickerAndDelayedExecution interface { - WithTicker + Clock // AfterFunc executes f in its own goroutine after waiting // for d duration and returns a Timer whose channel can be // closed by calling Stop() on the Timer. @@ -79,7 +69,7 @@ type Ticker interface { Stop() } -var _ = WithTicker(RealClock{}) +var _ Clock = RealClock{} // RealClock really calls time.Now() type RealClock struct{} @@ -115,13 +105,6 @@ func (RealClock) AfterFunc(d time.Duration, f func()) Timer { } } -// Tick is the same as time.Tick(d) -// This method does not allow to free/GC the backing ticker. Use -// NewTicker instead. -func (RealClock) Tick(d time.Duration) <-chan time.Time { - return time.Tick(d) -} - // NewTicker returns a new Ticker. func (RealClock) NewTicker(d time.Duration) Ticker { return &realTicker{ diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index bcdf5f8ee1..d1a4751c94 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -172,73 +172,6 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { Formatter{}.KVListFormat(b, keysAndValues...) } -// KVFormat serializes one key/value pair into the provided buffer. -// A space gets inserted before the pair. -func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { - b.WriteByte(' ') - // Keys are assumed to be well-formed according to - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments - // for the sake of performance. Keys with spaces, - // special characters, etc. will break parsing. - if sK, ok := k.(string); ok { - // Avoid one allocation when the key is a string, which - // normally it should be. - b.WriteString(sK) - } else { - b.WriteString(fmt.Sprintf("%s", k)) - } - - // The type checks are sorted so that more frequently used ones - // come first because that is then faster in the common - // cases. In Kubernetes, ObjectRef (a Stringer) is more common - // than plain strings - // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). - switch v := v.(type) { - case textWriter: - writeTextWriterValue(b, v) - case fmt.Stringer: - writeStringValue(b, StringerToString(v)) - case string: - writeStringValue(b, v) - case error: - writeStringValue(b, ErrorToString(v)) - case logr.Marshaler: - value := MarshalerToValue(v) - // A marshaler that returns a string is useful for - // delayed formatting of complex values. We treat this - // case like a normal string. This is useful for - // multi-line support. - // - // We could do this by recursively formatting a value, - // but that comes with the risk of infinite recursion - // if a marshaler returns itself. Instead we call it - // only once and rely on it returning the intended - // value directly. - switch value := value.(type) { - case string: - writeStringValue(b, value) - default: - f.formatAny(b, value) - } - case []byte: - // In https://github.com/kubernetes/klog/pull/237 it was decided - // to format byte slices with "%+q". The advantages of that are: - // - readable output if the bytes happen to be printable - // - non-printable bytes get represented as unicode escape - // sequences (\uxxxx) - // - // The downsides are that we cannot use the faster - // strconv.Quote here and that multi-line output is not - // supported. If developers know that a byte array is - // printable and they want multi-line output, they can - // convert the value to string before logging it. - b.WriteByte('=') - b.WriteString(fmt.Sprintf("%+q", v)) - default: - f.formatAny(b, v) - } -} - func KVFormat(b *bytes.Buffer, k, v interface{}) { Formatter{}.KVFormat(b, k, v) } @@ -251,6 +184,10 @@ func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { b.WriteString(f.AnyToStringHook(v)) return } + formatAsJSON(b, v) +} + +func formatAsJSON(b *bytes.Buffer, v interface{}) { encoder := json.NewEncoder(b) l := b.Len() if err := encoder.Encode(v); err != nil { diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go new file mode 100644 index 0000000000..d9c7d15467 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go @@ -0,0 +1,97 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go new file mode 100644 index 0000000000..89acf97723 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go @@ -0,0 +1,155 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + "log/slog" + "strconv" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + // + // slog.LogValuer does not need to be handled here because the handler will + // already have resolved such special values to the final value for logging. + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case slog.Value: + // This must come before fmt.Stringer because slog.Value implements + // fmt.Stringer, but does not produce the output that we want. + b.WriteByte('=') + generateJSON(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case slog.LogValuer: + value := slog.AnyValue(v).Resolve() + if value.Kind() == slog.KindString { + writeStringValue(b, value.String()) + } else { + b.WriteByte('=') + generateJSON(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} + +// generateJSON has the same preference for plain strings as KVFormat. +// In contrast to KVFormat it always produces valid JSON with no line breaks. +func generateJSON(b *bytes.Buffer, v interface{}) { + switch v := v.(type) { + case slog.Value: + switch v.Kind() { + case slog.KindGroup: + // Format as a JSON group. We must not involve f.AnyToStringHook (if there is any), + // because there is no guarantee that it produces valid JSON. + b.WriteByte('{') + for i, attr := range v.Group() { + if i > 0 { + b.WriteByte(',') + } + b.WriteString(strconv.Quote(attr.Key)) + b.WriteByte(':') + generateJSON(b, attr.Value) + } + b.WriteByte('}') + case slog.KindLogValuer: + generateJSON(b, v.Resolve()) + default: + // Peel off the slog.Value wrapper and format the actual value. + generateJSON(b, v.Any()) + } + case fmt.Stringer: + b.WriteString(strconv.Quote(StringerToString(v))) + case logr.Marshaler: + generateJSON(b, MarshalerToValue(v)) + case slog.LogValuer: + generateJSON(b, slog.AnyValue(v).Resolve().Any()) + case string: + b.WriteString(strconv.Quote(v)) + case error: + b.WriteString(strconv.Quote(v.Error())) + default: + formatAsJSON(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go new file mode 100644 index 0000000000..21f1697d09 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sloghandler + +import ( + "context" + "log/slog" + "runtime" + "strings" + "time" + + "k8s.io/klog/v2/internal/severity" +) + +func Handle(_ context.Context, record slog.Record, groups string, printWithInfos func(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{})) error { + now := record.Time + if now.IsZero() { + // This format doesn't support printing entries without a time. + now = time.Now() + } + + // slog has numeric severity levels, with 0 as default "info", negative for debugging, and + // positive with some pre-defined levels for more important. Those ranges get mapped to + // the corresponding klog levels where possible, with "info" the default that is used + // also for negative debug levels. + level := record.Level + s := severity.InfoLog + switch { + case level >= slog.LevelError: + s = severity.ErrorLog + case level >= slog.LevelWarn: + s = severity.WarningLog + } + + var file string + var line int + if record.PC != 0 { + // Same as https://cs.opensource.google/go/x/exp/+/642cacee:slog/record.go;drc=642cacee5cc05231f45555a333d07f1005ffc287;l=70 + fs := runtime.CallersFrames([]uintptr{record.PC}) + f, _ := fs.Next() + if f.File != "" { + file = f.File + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + line = f.Line + } + } else { + file = "???" + line = 1 + } + + kvList := make([]interface{}, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = appendAttr(groups, kvList, attr) + return true + }) + + printWithInfos(file, line, now, nil, s, record.Message, kvList) + return nil +} + +func Attrs2KVList(groups string, attrs []slog.Attr) []interface{} { + kvList := make([]interface{}, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = appendAttr(groups, kvList, attr) + } + return kvList +} + +func appendAttr(groups string, kvList []interface{}, attr slog.Attr) []interface{} { + var key string + if groups != "" { + key = groups + "." + attr.Key + } else { + key = attr.Key + } + return append(kvList, key, attr.Value) +} diff --git a/vendor/k8s.io/klog/v2/k8s_references_slog.go b/vendor/k8s.io/klog/v2/k8s_references_slog.go new file mode 100644 index 0000000000..5522c84c77 --- /dev/null +++ b/vendor/k8s.io/klog/v2/k8s_references_slog.go @@ -0,0 +1,39 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "log/slog" +) + +func (ref ObjectRef) LogValue() slog.Value { + if ref.Namespace != "" { + return slog.GroupValue(slog.String("name", ref.Name), slog.String("namespace", ref.Namespace)) + } + return slog.GroupValue(slog.String("name", ref.Name)) +} + +var _ slog.LogValuer = ObjectRef{} + +func (ks kobjSlice) LogValue() slog.Value { + return slog.AnyValue(ks.MarshalLog()) +} + +var _ slog.LogValuer = kobjSlice{} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 152f8a6bd6..72502db3ae 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -415,7 +415,7 @@ func init() { logging.stderrThreshold = severityValue{ Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. } - commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)") + commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true)") commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") @@ -518,9 +518,7 @@ type settings struct { func (s settings) deepCopy() settings { // vmodule is a slice and would be shared, so we have copy it. filter := make([]modulePat, len(s.vmodule.filter)) - for i := range s.vmodule.filter { - filter[i] = s.vmodule.filter[i] - } + copy(filter, s.vmodule.filter) s.vmodule.filter = filter if s.logger != nil { @@ -657,16 +655,15 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin } } } - return l.formatHeader(s, file, line), file, line + return l.formatHeader(s, file, line, timeNow()), file, line } // formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer { +func (l *loggingT) formatHeader(s severity.Severity, file string, line int, now time.Time) *buffer.Buffer { buf := buffer.GetBuffer() if l.skipHeaders { return buf } - now := timeNow() buf.FormatHeader(s, file, line, now) return buf } @@ -676,6 +673,10 @@ func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFil } func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprintln(args...) // cause vet to treat this function like fmt.Println + } + buf, file, line := l.header(s, depth) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing @@ -696,7 +697,15 @@ func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilte } func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprint(args...) // // cause vet to treat this function like fmt.Print + } + buf, file, line := l.header(s, depth) + l.printWithInfos(buf, file, line, s, logger, filter, depth+1, args...) +} + +func (l *loggingT) printWithInfos(buf *buffer.Buffer, file string, line int, s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing // logger implementation to print headers. @@ -719,6 +728,10 @@ func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilt } func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) { + if false { + _ = fmt.Sprintf(format, args...) // cause vet to treat this function like fmt.Printf + } + buf, file, line := l.header(s, depth) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing @@ -741,7 +754,7 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter Lo // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) + buf := l.formatHeader(s, file, line, timeNow()) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing // logger implementation to print headers. @@ -759,7 +772,7 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, fil l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr) } -// if loggr is specified, will call loggr.Error, otherwise output with logging module. +// if logger is specified, will call logger.Error, otherwise output with logging module. func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) @@ -771,7 +784,7 @@ func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...) } -// if loggr is specified, will call loggr.Info, otherwise output with logging module. +// if logger is specified, will call logger.Info, otherwise output with logging module. func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) @@ -783,7 +796,7 @@ func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg str l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...) } -// printS is called from infoS and errorS if loggr is not specified. +// printS is called from infoS and errorS if logger is not specified. // set log severity by s func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { // Only create a new buffer if we don't have one cached. @@ -796,7 +809,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, serialize.KVListFormat(&b.Buffer, "err", err) } serialize.KVListFormat(&b.Buffer, keysAndValues...) - l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer) + l.printDepth(s, nil, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. buffer.PutBuffer(b) } @@ -873,6 +886,9 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu if logger.writeKlogBuffer != nil { logger.writeKlogBuffer(data) } else { + if len(data) > 0 && data[len(data)-1] == '\n' { + data = data[:len(data)-1] + } // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} if s == severity.ErrorLog { @@ -897,7 +913,7 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu l.exit(err) } } - l.file[severity.InfoLog].Write(data) + _, _ = l.file[severity.InfoLog].Write(data) } else { if l.file[s] == nil { if err := l.createFiles(s); err != nil { @@ -907,20 +923,20 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu } if l.oneOutput { - l.file[s].Write(data) + _, _ = l.file[s].Write(data) } else { switch s { case severity.FatalLog: - l.file[severity.FatalLog].Write(data) + _, _ = l.file[severity.FatalLog].Write(data) fallthrough case severity.ErrorLog: - l.file[severity.ErrorLog].Write(data) + _, _ = l.file[severity.ErrorLog].Write(data) fallthrough case severity.WarningLog: - l.file[severity.WarningLog].Write(data) + _, _ = l.file[severity.WarningLog].Write(data) fallthrough case severity.InfoLog: - l.file[severity.InfoLog].Write(data) + _, _ = l.file[severity.InfoLog].Write(data) } } } @@ -946,7 +962,7 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu logExitFunc = func(error) {} // If we get a write error, we'll still exit below. for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) + _, _ = f.Write(trace) } } l.mu.Unlock() @@ -1102,7 +1118,7 @@ const flushInterval = 5 * time.Second // flushDaemon periodically flushes the log file buffers. type flushDaemon struct { mu sync.Mutex - clock clock.WithTicker + clock clock.Clock flush func() stopC chan struct{} stopDone chan struct{} @@ -1110,7 +1126,7 @@ type flushDaemon struct { // newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a // clock.RealClock is used. -func newFlushDaemon(flush func(), tickClock clock.WithTicker) *flushDaemon { +func newFlushDaemon(flush func(), tickClock clock.Clock) *flushDaemon { if tickClock == nil { tickClock = clock.RealClock{} } @@ -1201,8 +1217,8 @@ func (l *loggingT) flushAll() { for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error + _ = file.Flush() // ignore error + _ = file.Sync() // ignore error } } if logging.loggerOptions.flush != nil { @@ -1281,9 +1297,7 @@ func (l *loggingT) setV(pc uintptr) Level { fn := runtime.FuncForPC(pc) file, _ := fn.FileLine(pc) // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } + file = strings.TrimSuffix(file, ".go") if slash := strings.LastIndex(file, "/"); slash >= 0 { file = file[slash+1:] } diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go index 1025d644f3..8bee16204d 100644 --- a/vendor/k8s.io/klog/v2/klog_file.go +++ b/vendor/k8s.io/klog/v2/klog_file.go @@ -109,8 +109,8 @@ func create(tag string, t time.Time, startup bool) (f *os.File, filename string, f, err := openOrCreate(fname, startup) if err == nil { symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err + _ = os.Remove(symlink) // ignore err + _ = os.Symlink(name, symlink) // ignore err return f, fname, nil } lastErr = err diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index 15de00e21f..efec96fd45 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -22,6 +22,11 @@ import ( "k8s.io/klog/v2/internal/serialize" ) +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" +) + // NewKlogr returns a logger that is functionally identical to // klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The // difference is that it uses a simpler implementation. @@ -32,10 +37,15 @@ func NewKlogr() Logger { // klogger is a subset of klogr/klogr.go. It had to be copied to break an // import cycle (klogr wants to use klog, and klog wants to use klogr). type klogger struct { - level int callDepth int - prefix string - values []interface{} + + // hasPrefix is true if the first entry in values is the special + // nameKey key/value. Such an entry gets added and later updated in + // WithName. + hasPrefix bool + + values []interface{} + groups string } func (l *klogger) Init(info logr.RuntimeInfo) { @@ -44,34 +54,40 @@ func (l *klogger) Init(info logr.RuntimeInfo) { func (l *klogger) Info(level int, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) - if l.prefix != "" { - msg = l.prefix + ": " + msg - } // Skip this function. VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } func (l *klogger) Enabled(level int) bool { - // Skip this function and logr.Logger.Info where Enabled is called. - return VDepth(l.callDepth+2, Level(level)).Enabled() + return VDepth(l.callDepth+1, Level(level)).Enabled() } func (l *klogger) Error(err error, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) - if l.prefix != "" { - msg = l.prefix + ": " + msg - } ErrorSDepth(l.callDepth+1, err, msg, merged...) } // WithName returns a new logr.Logger with the specified name appended. klogr -// uses '/' characters to separate name elements. Callers should not pass '/' +// uses '.' characters to separate name elements. Callers should not pass '.' // in the provided name string, but this library does not actually enforce that. func (l klogger) WithName(name string) logr.LogSink { - if len(l.prefix) > 0 { - l.prefix = l.prefix + "/" + if l.hasPrefix { + // Copy slice and modify value. No length checks and type + // assertions are needed because hasPrefix is only true if the + // first two elements exist and are key/value strings. + v := make([]interface{}, 0, len(l.values)) + v = append(v, l.values...) + prefix, _ := v[1].(string) + v[1] = prefix + "." + name + l.values = v + } else { + // Preprend new key/value pair. + v := make([]interface{}, 0, 2+len(l.values)) + v = append(v, nameKey, name) + v = append(v, l.values...) + l.values = v + l.hasPrefix = true } - l.prefix += name return &l } diff --git a/vendor/k8s.io/klog/v2/klogr_slog.go b/vendor/k8s.io/klog/v2/klogr_slog.go new file mode 100644 index 0000000000..f7bf740306 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klogr_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "context" + "log/slog" + "strconv" + "time" + + "github.com/go-logr/logr/slogr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" + "k8s.io/klog/v2/internal/sloghandler" +) + +func (l *klogger) Handle(ctx context.Context, record slog.Record) error { + if logging.logger != nil { + if slogSink, ok := logging.logger.GetSink().(slogr.SlogSink); ok { + // Let that logger do the work. + return slogSink.Handle(ctx, record) + } + } + + return sloghandler.Handle(ctx, record, l.groups, slogOutput) +} + +// slogOutput corresponds to several different functions in klog.go. +// It goes through some of the same checks and formatting steps before +// it ultimately converges by calling logging.printWithInfos. +func slogOutput(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) { + // See infoS. + if logging.logger != nil { + // Taking this path happens when klog has a logger installed + // as backend which doesn't support slog. Not good, we have to + // guess about the call depth and drop the actual location. + logger := logging.logger.WithCallDepth(2) + if s > severity.ErrorLog { + logger.Error(err, msg, kvList...) + } else { + logger.Info(msg, kvList...) + } + return + } + + // See printS. + b := buffer.GetBuffer() + b.WriteString(strconv.Quote(msg)) + if err != nil { + serialize.KVListFormat(&b.Buffer, "err", err) + } + serialize.KVListFormat(&b.Buffer, kvList...) + + // See print + header. + buf := logging.formatHeader(s, file, line, now) + logging.printWithInfos(buf, file, line, s, nil, nil, 0, &b.Buffer) + + buffer.PutBuffer(b) +} + +func (l *klogger) WithAttrs(attrs []slog.Attr) slogr.SlogSink { + clone := *l + clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs)) + return &clone +} + +func (l *klogger) WithGroup(name string) slogr.SlogSink { + clone := *l + if clone.groups != "" { + clone.groups += "." + name + } else { + clone.groups = name + } + return &clone +} + +var _ slogr.SlogSink = &klogger{} diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go index e4e740cad4..e0811e8344 100644 --- a/vendor/k8s.io/utils/integer/integer.go +++ b/vendor/k8s.io/utils/integer/integer.go @@ -16,6 +16,8 @@ limitations under the License. package integer +import "math" + // IntMax returns the maximum of the params func IntMax(a, b int) int { if b > a { @@ -65,9 +67,7 @@ func Int64Min(a, b int64) int64 { } // RoundToInt32 rounds floats into integer numbers. +// Deprecated: use math.Round() and a cast directly. func RoundToInt32(a float64) int32 { - if a < 0 { - return int32(a - 0.5) - } - return int32(a + 0.5) + return int32(math.Round(a)) } diff --git a/vendor/modules.txt b/vendor/modules.txt index 78c62ab7e2..a287b7aba4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -292,9 +292,10 @@ github.com/go-git/go-git/v5/utils/ioutil # github.com/go-ldap/ldap/v3 v3.4.3 ## explicit; go 1.13 github.com/go-ldap/ldap/v3 -# github.com/go-logr/logr v1.2.4 -## explicit; go 1.16 +# github.com/go-logr/logr v1.3.0 +## explicit; go 1.18 github.com/go-logr/logr +github.com/go-logr/logr/slogr # github.com/go-openapi/analysis v0.21.4 ## explicit; go 1.13 github.com/go-openapi/analysis @@ -558,7 +559,7 @@ github.com/opencontainers/runc/libcontainer/user # github.com/opencontainers/runtime-spec v1.1.0 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/openshift/api v0.0.0-20231010075512-1ccc6058c62d +# github.com/openshift/api v0.0.0-20231207204216-5efc6fca4b2d ## explicit; go 1.20 github.com/openshift/api github.com/openshift/api/annotations @@ -597,6 +598,7 @@ github.com/openshift/api/monitoring github.com/openshift/api/monitoring/v1alpha1 github.com/openshift/api/network github.com/openshift/api/network/v1 +github.com/openshift/api/network/v1alpha1 github.com/openshift/api/networkoperator github.com/openshift/api/networkoperator/v1 github.com/openshift/api/oauth @@ -910,7 +912,7 @@ go.starlark.net/resolve go.starlark.net/starlark go.starlark.net/starlarkstruct go.starlark.net/syntax -# golang.org/x/crypto v0.15.0 +# golang.org/x/crypto v0.16.0 ## explicit; go 1.18 golang.org/x/crypto/cast5 golang.org/x/crypto/ed25519 @@ -939,7 +941,7 @@ golang.org/x/exp/slices # golang.org/x/mod v0.13.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.18.0 +# golang.org/x/net v0.19.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -957,14 +959,14 @@ golang.org/x/oauth2/internal # golang.org/x/sync v0.5.0 ## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/sys v0.14.0 +# golang.org/x/sys v0.15.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.14.0 +# golang.org/x/term v0.15.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -1060,7 +1062,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.28.2 +# k8s.io/api v0.28.4 ## explicit; go 1.20 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1128,7 +1130,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.28.2 +# k8s.io/apimachinery v0.28.4 ## explicit; go 1.20 k8s.io/apimachinery/pkg/api/apitesting k8s.io/apimachinery/pkg/api/equality @@ -1564,7 +1566,7 @@ k8s.io/component-base/version ## explicit; go 1.20 k8s.io/component-helpers/auth/rbac/reconciliation k8s.io/component-helpers/auth/rbac/validation -# k8s.io/klog/v2 v2.100.1 +# k8s.io/klog/v2 v2.110.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -1572,6 +1574,7 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity +k8s.io/klog/v2/internal/sloghandler # k8s.io/kube-aggregator v0.28.2 ## explicit; go 1.20 k8s.io/kube-aggregator/pkg/apis/apiregistration @@ -1690,7 +1693,7 @@ k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1 # k8s.io/pod-security-admission v0.28.2 ## explicit; go 1.20 k8s.io/pod-security-admission/api -# k8s.io/utils v0.0.0-20230726121419-3b25d923346b +# k8s.io/utils v0.0.0-20231127182322-b307cd553661 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1801,7 +1804,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/merge2 sigs.k8s.io/kustomize/kyaml/yaml/merge3 sigs.k8s.io/kustomize/kyaml/yaml/schema sigs.k8s.io/kustomize/kyaml/yaml/walk -# sigs.k8s.io/structured-merge-diff/v4 v4.2.3 +# sigs.k8s.io/structured-merge-diff/v4 v4.4.1 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/merge diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go index 9b14ca581b..41fc2474a4 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go @@ -28,20 +28,15 @@ import ( // for PathElementSet and SetNodeMap, so we could probably share the // code. type PathElementValueMap struct { - members sortedPathElementValues + valueMap PathElementMap } func MakePathElementValueMap(size int) PathElementValueMap { return PathElementValueMap{ - members: make(sortedPathElementValues, 0, size), + valueMap: MakePathElementMap(size), } } -type pathElementValue struct { - PathElement PathElement - Value value.Value -} - type sortedPathElementValues []pathElementValue // Implement the sort interface; this would permit bulk creation, which would @@ -53,7 +48,40 @@ func (spev sortedPathElementValues) Less(i, j int) bool { func (spev sortedPathElementValues) Swap(i, j int) { spev[i], spev[j] = spev[j], spev[i] } // Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { + s.valueMap.Insert(pe, v) +} + +// Get retrieves the value associated with the given PathElement from the map. +// (nil, false) is returned if there is no such PathElement. +func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { + v, ok := s.valueMap.Get(pe) + if !ok { + return nil, false + } + return v.(value.Value), true +} + +// PathElementValueMap is a map from PathElement to interface{}. +type PathElementMap struct { + members sortedPathElementValues +} + +type pathElementValue struct { + PathElement PathElement + Value interface{} +} + +func MakePathElementMap(size int) PathElementMap { + return PathElementMap{ + members: make(sortedPathElementValues, 0, size), + } +} + +// Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. +func (s *PathElementMap) Insert(pe PathElement, v interface{}) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) @@ -62,6 +90,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { return } if s.members[loc].PathElement.Equals(pe) { + s.members[loc].Value = v return } s.members = append(s.members, pathElementValue{}) @@ -71,7 +100,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { // Get retrieves the value associated with the given PathElement from the map. // (nil, false) is returned if there is no such PathElement. -func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { +func (s *PathElementMap) Get(pe PathElement) (interface{}, bool) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go index 75a492d8ea..f1aa258609 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go @@ -112,7 +112,7 @@ func ConflictsFromManagers(sets fieldpath.ManagedFields) Conflicts { set.Set().Iterate(func(p fieldpath.Path) { conflicts = append(conflicts, Conflict{ Manager: manager, - Path: p, + Path: p.Copy(), }) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go index 1b23dcbd5e..d5a977d607 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -18,6 +18,7 @@ import ( "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" ) // Converter is an interface to the conversion logic. The converter @@ -27,19 +28,39 @@ type Converter interface { IsMissingVersionError(error) bool } -// Updater is the object used to compute updated FieldSets and also -// merge the object on Apply. -type Updater struct { +// UpdateBuilder allows you to create a new Updater by exposing all of +// the options and setting them once. +type UpdaterBuilder struct { Converter Converter IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set - enableUnions bool + // Stop comparing the new object with old object after applying. + // This was initially used to avoid spurious etcd update, but + // since that's vastly inefficient, we've come-up with a better + // way of doing that. Create this flag to stop it. + // Comparing has become more expensive too now that we're not using + // `Compare` but `value.Equals` so this gives an option to avoid it. + ReturnInputOnNoop bool } -// EnableUnionFeature turns on union handling. It is disabled by default until the -// feature is complete. -func (s *Updater) EnableUnionFeature() { - s.enableUnions = true +func (u *UpdaterBuilder) BuildUpdater() *Updater { + return &Updater{ + Converter: u.Converter, + IgnoredFields: u.IgnoredFields, + returnInputOnNoop: u.ReturnInputOnNoop, + } +} + +// Updater is the object used to compute updated FieldSets and also +// merge the object on Apply. +type Updater struct { + // Deprecated: This will eventually become private. + Converter Converter + + // Deprecated: This will eventually become private. + IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + + returnInputOnNoop bool } func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) { @@ -126,12 +147,6 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp if err != nil { return nil, fieldpath.ManagedFields{}, err } - if s.enableUnions { - newObject, err = liveObject.NormalizeUnions(newObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true) if err != nil { return nil, fieldpath.ManagedFields{}, err @@ -145,7 +160,7 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp ignored = fieldpath.NewSet() } managers[manager] = fieldpath.NewVersionedSet( - managers[manager].Set().Union(compare.Modified).Union(compare.Added).Difference(compare.Removed).RecursiveDifference(ignored), + managers[manager].Set().Difference(compare.Removed).Union(compare.Modified).Union(compare.Added).RecursiveDifference(ignored), version, false, ) @@ -157,30 +172,17 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp // Apply should be called when Apply is run, given the current object as // well as the configuration that is applied. This will merge the object -// and return it. If the object hasn't changed, nil is returned (the -// managers can still have changed though). +// and return it. func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string, force bool) (*typed.TypedValue, fieldpath.ManagedFields, error) { var err error managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) if err != nil { return nil, fieldpath.ManagedFields{}, err } - if s.enableUnions { - configObject, err = configObject.NormalizeUnionsApply(configObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } newObject, err := liveObject.Merge(configObject) if err != nil { return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err) } - if s.enableUnions { - newObject, err = configObject.NormalizeUnionsApply(newObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } lastSet := managers[manager] set, err := configObject.ToFieldSet() if err != nil { @@ -200,11 +202,11 @@ func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fiel if err != nil { return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err) } - managers, compare, err := s.update(liveObject, newObject, version, managers, manager, force) + managers, _, err = s.update(liveObject, newObject, version, managers, manager, force) if err != nil { return nil, fieldpath.ManagedFields{}, err } - if compare.IsSame() { + if !s.returnInputOnNoop && value.EqualsUsing(value.NewFreelistAllocator(), liveObject.AsValue(), newObject.AsValue()) { newObject = nil } return newObject, managers, nil @@ -218,7 +220,8 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel if lastSet == nil || lastSet.Set().Empty() { return merged, nil } - convertedMerged, err := s.Converter.Convert(merged, lastSet.APIVersion()) + version := lastSet.APIVersion() + convertedMerged, err := s.Converter.Convert(merged, version) if err != nil { if s.Converter.IsMissingVersionError(err) { return merged, nil @@ -228,7 +231,7 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel sc, tr := convertedMerged.Schema(), convertedMerged.TypeRef() pruned := convertedMerged.RemoveItems(lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr)) - pruned, err = s.addBackOwnedItems(convertedMerged, pruned, managers, applyingManager) + pruned, err = s.addBackOwnedItems(convertedMerged, pruned, version, managers, applyingManager) if err != nil { return nil, fmt.Errorf("failed add back owned items: %v", err) } @@ -241,7 +244,7 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel // addBackOwnedItems adds back any fields, list and map items that were removed by prune, // but other appliers or updaters (or the current applier's new config) claim to own. -func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { +func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, prunedVersion fieldpath.APIVersion, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { var err error managedAtVersion := map[fieldpath.APIVersion]*fieldpath.Set{} for _, managerSet := range managedFields { @@ -252,7 +255,6 @@ func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFie } // Add back owned items at pruned version first to avoid conversion failure // caused by pruned fields which are required for conversion. - prunedVersion := fieldpath.APIVersion(*pruned.TypeRef().NamedType) if managed, ok := managedAtVersion[prunedVersion]; ok { merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, prunedVersion, managed) if err != nil { diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go index 7e5dc75827..5d3707a5b5 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go @@ -73,7 +73,7 @@ type Atom struct { } // Scalar (AKA "primitive") represents a type which has a single value which is -// either numeric, string, or boolean. +// either numeric, string, or boolean, or untyped for any of them. // // TODO: split numeric into float/int? Something even more fine-grained? type Scalar string @@ -82,6 +82,7 @@ const ( Numeric = Scalar("numeric") String = Scalar("string") Boolean = Scalar("boolean") + Untyped = Scalar("untyped") ) // ElementRelationship is an enum of the different possible relationships diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go index 7d64d1308c..6eb6c36df3 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go @@ -110,7 +110,7 @@ var SchemaSchemaYAML = `types: scalar: string - name: deduceInvalidDiscriminator type: - scalar: bool + scalar: boolean - name: fields type: list: @@ -145,6 +145,7 @@ var SchemaSchemaYAML = `types: list: elementType: scalar: string + elementRelationship: atomic - name: untyped map: fields: diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go new file mode 100644 index 0000000000..ed483cbbc4 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go @@ -0,0 +1,460 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// Comparison is the return value of a TypedValue.Compare() operation. +// +// No field will appear in more than one of the three fieldsets. If all of the +// fieldsets are empty, then the objects must have been equal. +type Comparison struct { + // Removed contains any fields removed by rhs (the right-hand-side + // object in the comparison). + Removed *fieldpath.Set + // Modified contains fields present in both objects but different. + Modified *fieldpath.Set + // Added contains any fields added by rhs. + Added *fieldpath.Set +} + +// IsSame returns true if the comparison returned no changes (the two +// compared objects are similar). +func (c *Comparison) IsSame() bool { + return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() +} + +// String returns a human readable version of the comparison. +func (c *Comparison) String() string { + bld := strings.Builder{} + if !c.Modified.Empty() { + bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) + } + if !c.Added.Empty() { + bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) + } + if !c.Removed.Empty() { + bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) + } + return bld.String() +} + +// ExcludeFields fields from the compare recursively removes the fields +// from the entire comparison +func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { + if fields == nil || fields.Empty() { + return c + } + c.Removed = c.Removed.RecursiveDifference(fields) + c.Modified = c.Modified.RecursiveDifference(fields) + c.Added = c.Added.RecursiveDifference(fields) + return c +} + +type compareWalker struct { + lhs value.Value + rhs value.Value + schema *schema.Schema + typeRef schema.TypeRef + + // Current path that we are comparing + path fieldpath.Path + + // Resulting comparison. + comparison *Comparison + + // internal housekeeping--don't set when constructing. + inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*compareWalker + + allocator value.Allocator +} + +// compare compares stuff. +func (w *compareWalker) compare(prefixFn func() string) (errs ValidationErrors) { + if w.lhs == nil && w.rhs == nil { + // check this condidition here instead of everywhere below. + return errorf("at least one of lhs and rhs must be provided") + } + a, ok := w.schema.Resolve(w.typeRef) + if !ok { + return errorf("schema error: no type found matching: %v", *w.typeRef.NamedType) + } + + alhs := deduceAtom(a, w.lhs) + arhs := deduceAtom(a, w.rhs) + + // deduceAtom does not fix the type for nil values + // nil is a wildcard and will accept whatever form the other operand takes + if w.rhs == nil { + errs = append(errs, handleAtom(alhs, w.typeRef, w)...) + } else if w.lhs == nil || alhs.Equals(&arhs) { + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } else { + w2 := *w + errs = append(errs, handleAtom(alhs, w.typeRef, &w2)...) + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } + + if !w.inLeaf { + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } + } + return errs.WithLazyPrefix(prefixFn) +} + +// doLeaf should be called on leaves before descending into children, if there +// will be a descent. It modifies w.inLeaf. +func (w *compareWalker) doLeaf() { + if w.inLeaf { + // We're in a "big leaf", an atomic map or list. Ignore + // subsequent leaves. + return + } + w.inLeaf = true + + // We don't recurse into leaf fields for merging. + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } else if !value.EqualsUsing(w.allocator, w.rhs, w.lhs) { + // TODO: Equality is not sufficient for this. + // Need to implement equality check on the value type. + w.comparison.Modified.Insert(w.path) + } +} + +func (w *compareWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) + } + + // All scalars are leaf fields. + w.doLeaf() + + return nil +} + +func (w *compareWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef, cmp *Comparison) *compareWalker { + if w.spareWalkers == nil { + // first descent. + w.spareWalkers = &[]*compareWalker{} + } + var w2 *compareWalker + if n := len(*w.spareWalkers); n > 0 { + w2, *w.spareWalkers = (*w.spareWalkers)[n-1], (*w.spareWalkers)[:n-1] + } else { + w2 = &compareWalker{} + } + *w2 = *w + w2.typeRef = tr + w2.path = append(w2.path, pe) + w2.lhs = nil + w2.rhs = nil + w2.comparison = cmp + return w2 +} + +func (w *compareWalker) finishDescent(w2 *compareWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + w.path = w2.path[:len(w2.path)-1] + *w.spareWalkers = append(*w.spareWalkers, w2) +} + +func (w *compareWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) { + if v == nil { + return nil, nil + } + m, err := mapValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return m, nil +} + +func (w *compareWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) { + rLen := 0 + if rhs != nil { + rLen = rhs.Length() + } + lLen := 0 + if lhs != nil { + lLen = lhs.Length() + } + + maxLength := rLen + if lLen > maxLength { + maxLength = lLen + } + // Contains all the unique PEs between lhs and rhs, exactly once. + // Order doesn't matter since we're just tracking ownership in a set. + allPEs := make([]fieldpath.PathElement, 0, maxLength) + + // Gather all the elements from lhs, indexed by PE, in a list for duplicates. + lValues := fieldpath.MakePathElementMap(lLen) + for i := 0; i < lLen; i++ { + child := lhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + + if v, found := lValues.Get(pe); found { + list := v.([]value.Value) + lValues.Insert(pe, append(list, child)) + } else { + lValues.Insert(pe, []value.Value{child}) + allPEs = append(allPEs, pe) + } + } + + // Gather all the elements from rhs, indexed by PE, in a list for duplicates. + rValues := fieldpath.MakePathElementMap(rLen) + for i := 0; i < rLen; i++ { + rValue := rhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, rValue) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if v, found := rValues.Get(pe); found { + list := v.([]value.Value) + rValues.Insert(pe, append(list, rValue)) + } else { + rValues.Insert(pe, []value.Value{rValue}) + if _, found := lValues.Get(pe); !found { + allPEs = append(allPEs, pe) + } + } + } + + for _, pe := range allPEs { + lList := []value.Value(nil) + if l, ok := lValues.Get(pe); ok { + lList = l.([]value.Value) + } + rList := []value.Value(nil) + if l, ok := rValues.Get(pe); ok { + rList = l.([]value.Value) + } + + switch { + case len(lList) == 0 && len(rList) == 0: + // We shouldn't be here anyway. + return + // Normal use-case: + // We have no duplicates for this PE, compare items one-to-one. + case len(lList) <= 1 && len(rList) <= 1: + lValue := value.Value(nil) + if len(lList) != 0 { + lValue = lList[0] + } + rValue := value.Value(nil) + if len(rList) != 0 { + rValue = rList[0] + } + errs = append(errs, w.compareListItem(t, pe, lValue, rValue)...) + // Duplicates before & after use-case: + // Compare the duplicates lists as if they were atomic, mark modified if they changed. + case len(lList) >= 2 && len(rList) >= 2: + listEqual := func(lList, rList []value.Value) bool { + if len(lList) != len(rList) { + return false + } + for i := range lList { + if !value.Equals(lList[i], rList[i]) { + return false + } + } + return true + } + if !listEqual(lList, rList) { + w.comparison.Modified.Insert(append(w.path, pe)) + } + // Duplicates before & not anymore use-case: + // Rcursively add new non-duplicate items, Remove duplicate marker, + case len(lList) >= 2: + if len(rList) != 0 { + errs = append(errs, w.compareListItem(t, pe, nil, rList[0])...) + } + w.comparison.Removed.Insert(append(w.path, pe)) + // New duplicates use-case: + // Recursively remove old non-duplicate items, add duplicate marker. + case len(rList) >= 2: + if len(lList) != 0 { + errs = append(errs, w.compareListItem(t, pe, lList[0], nil)...) + } + w.comparison.Added.Insert(append(w.path, pe)) + } + } + + return +} + +func (w *compareWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { + var errs ValidationErrors + length := 0 + if list != nil { + length = list.Length() + } + observed := fieldpath.MakePathElementValueMap(length) + pes := make([]fieldpath.PathElement, 0, length) + for i := 0; i < length; i++ { + child := list.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + // Ignore repeated occurences of `pe`. + if _, found := observed.Get(pe); found { + continue + } + observed.Insert(pe, child) + pes = append(pes, pe) + } + return pes, observed, errs +} + +func (w *compareWalker) compareListItem(t *schema.List, pe fieldpath.PathElement, lChild, rChild value.Value) ValidationErrors { + w2 := w.prepareDescent(pe, t.ElementType, w.comparison) + w2.lhs = lChild + w2.rhs = rChild + errs := w2.compare(pe.String) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { + if v == nil { + return nil, nil + } + l, err := listValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return l, nil +} + +func (w *compareWalker) doList(t *schema.List) (errs ValidationErrors) { + lhs, _ := w.derefList("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefList("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = w.visitListItems(t, lhs, rhs) + + return errs +} + +func (w *compareWalker) visitMapItem(t *schema.Map, out map[string]interface{}, key string, lhs, rhs value.Value) (errs ValidationErrors) { + fieldType := t.ElementType + if sf, ok := t.FindField(key); ok { + fieldType = sf.Type + } + pe := fieldpath.PathElement{FieldName: &key} + w2 := w.prepareDescent(pe, fieldType, w.comparison) + w2.lhs = lhs + w2.rhs = rhs + errs = append(errs, w2.compare(pe.String)...) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) { + out := map[string]interface{}{} + + value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool { + errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...) + return true + }) + + return errs +} + +func (w *compareWalker) doMap(t *schema.Map) (errs ValidationErrors) { + lhs, _ := w.derefMap("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefMap("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = append(errs, w.visitMapItems(t, lhs, rhs)...) + + return errs +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go index 19c77334f6..78fdb0e75f 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go @@ -197,7 +197,7 @@ func getAssociativeKeyDefault(s *schema.Schema, list *schema.List, fieldName str return field.Default, nil } -func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} if child.IsNull() { // null entries are illegal. @@ -225,7 +225,7 @@ func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, return pe, nil } -func setItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func setItemToPathElement(child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} switch { case child.IsMap(): @@ -245,16 +245,15 @@ func setItemToPathElement(list *schema.List, index int, child value.Value) (fiel } } -func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { - if list.ElementRelationship == schema.Associative { - if len(list.Keys) > 0 { - return keyedAssociativeListItemToPathElement(a, s, list, index, child) - } +func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { + if list.ElementRelationship != schema.Associative { + return fieldpath.PathElement{}, errors.New("invalid indexing of non-associative list") + } - // If there's no keys, then we must be a set of primitives. - return setItemToPathElement(list, index, child) + if len(list.Keys) > 0 { + return keyedAssociativeListItemToPathElement(a, s, list, child) } - // Use the index as a key for atomic lists. - return fieldpath.PathElement{Index: &index}, nil + // If there's no keys, then we must be a set of primitives. + return setItemToPathElement(child) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go index 9136440830..fa227ac405 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go @@ -113,11 +113,12 @@ func (w *mergingWalker) doLeaf() { w.rule(w) } -func (w *mergingWalker) doScalar(t *schema.Scalar) (errs ValidationErrors) { - errs = append(errs, validateScalar(t, w.lhs, "lhs: ")...) - errs = append(errs, validateScalar(t, w.rhs, "rhs: ")...) - if len(errs) > 0 { - return errs +func (w *mergingWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) } // All scalars are leaf fields. @@ -179,14 +180,18 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } out := make([]interface{}, 0, outLen) - rhsOrder, observedRHS, rhsErrs := w.indexListPathElements(t, rhs) + rhsPEs, observedRHS, rhsErrs := w.indexListPathElements(t, rhs, false) errs = append(errs, rhsErrs...) - lhsOrder, observedLHS, lhsErrs := w.indexListPathElements(t, lhs) + lhsPEs, observedLHS, lhsErrs := w.indexListPathElements(t, lhs, true) errs = append(errs, lhsErrs...) + if len(errs) != 0 { + return errs + } + sharedOrder := make([]*fieldpath.PathElement, 0, rLen) - for i := range rhsOrder { - pe := &rhsOrder[i] + for i := range rhsPEs { + pe := &rhsPEs[i] if _, ok := observedLHS.Get(*pe); ok { sharedOrder = append(sharedOrder, pe) } @@ -198,13 +203,15 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err sharedOrder = sharedOrder[1:] } - lLen, rLen = len(lhsOrder), len(rhsOrder) + mergedRHS := fieldpath.MakePathElementMap(len(rhsPEs)) + lLen, rLen = len(lhsPEs), len(rhsPEs) for lI, rI := 0, 0; lI < lLen || rI < rLen; { if lI < lLen && rI < rLen { - pe := lhsOrder[lI] - if pe.Equals(rhsOrder[rI]) { + pe := lhsPEs[lI] + if pe.Equals(rhsPEs[rI]) { // merge LHS & RHS items - lChild, _ := observedLHS.Get(pe) + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if the PE is duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -221,17 +228,17 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } continue } - if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsOrder[lI]) { + if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsPEs[lI]) { // shared item, but not the one we want in this round lI++ continue } } if lI < lLen { - pe := lhsOrder[lI] + pe := lhsPEs[lI] if _, ok := observedRHS.Get(pe); !ok { - // take LHS item - lChild, _ := observedLHS.Get(pe) + // take LHS item using At to make sure we get the right item (observed may not contain the right item). + lChild := lhs.AtUsing(w.allocator, lI) mergeOut, errs := w.mergeListItem(t, pe, lChild, nil) errs = append(errs, errs...) if mergeOut != nil { @@ -239,12 +246,16 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } lI++ continue + } else if _, ok := mergedRHS.Get(pe); ok { + // we've already merged it with RHS, we don't want to duplicate it, skip it. + lI++ } } if rI < rLen { // Take the RHS item, merge with matching LHS item if possible - pe := rhsOrder[rI] - lChild, _ := observedLHS.Get(pe) // may be nil + pe := rhsPEs[rI] + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if absent or duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -271,7 +282,7 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err return errs } -func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { +func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List, allowDuplicates bool) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { var errs ValidationErrors length := 0 if list != nil { @@ -281,7 +292,7 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( pes := make([]fieldpath.PathElement, 0, length) for i := 0; i < length; i++ { child := list.At(i) - pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -289,11 +300,15 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( // this element. continue } - if _, found := observed.Get(pe); found { + if _, found := observed.Get(pe); found && !allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) continue + } else if !found { + observed.Insert(pe, child) + } else { + // Duplicated items are not merged with the new value, make them nil. + observed.Insert(pe, value.NewValueInterface(nil)) } - observed.Insert(pe, child) pes = append(pes, pe) } return pes, observed, errs diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go index 3949a78fc6..4258ee5bab 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go @@ -93,13 +93,13 @@ func (p ParseableType) IsValid() bool { // FromYAML parses a yaml string into an object with the current schema // and the type "typename" or an error if validation fails. -func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { +func (p ParseableType) FromYAML(object YAMLObject, opts ...ValidationOptions) (*TypedValue, error) { var v interface{} err := yaml.Unmarshal([]byte(object), &v) if err != nil { return nil, err } - return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef) + return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef, opts...) } // FromUnstructured converts a go "interface{}" type, typically an @@ -108,8 +108,8 @@ func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { // The provided interface{} must be one of: map[string]interface{}, // map[interface{}]interface{}, []interface{}, int types, float types, // string or boolean. Nested interface{} must also be one of these types. -func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { - return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef) +func (p ParseableType) FromUnstructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { + return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef, opts...) } // FromStructured converts a go "interface{}" type, typically an structured object in @@ -117,12 +117,12 @@ func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { // schema validation. The provided "interface{}" value must be a pointer so that the // value can be modified via reflection. The provided "interface{}" may contain structs // and types that are converted to Values by the jsonMarshaler interface. -func (p ParseableType) FromStructured(in interface{}) (*TypedValue, error) { +func (p ParseableType) FromStructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { v, err := value.NewValueReflect(in) if err != nil { return nil, fmt.Errorf("error creating struct value reflector: %v", err) } - return AsTyped(v, p.Schema, p.TypeRef) + return AsTyped(v, p.Schema, p.TypeRef, opts...) } // DeducedParseableType is a ParseableType that deduces the type from diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go index a338d761d4..ad071ee8f3 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go @@ -74,9 +74,9 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { iter := l.RangeUsing(w.allocator) defer w.allocator.Free(iter) for iter.Next() { - i, item := iter.Item() + _, item := iter.Item() // Ignore error because we have already validated this list - pe, _ := listItemToPathElement(w.allocator, w.schema, t, i, item) + pe, _ := listItemToPathElement(w.allocator, w.schema, t, item) path, _ := fieldpath.MakePath(pe) // save items on the path when we shouldExtract // but ignore them when we are removing (i.e. !w.shouldExtract) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go index 047efff053..d563a87ee6 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go @@ -94,9 +94,31 @@ func (v *toFieldSetWalker) doScalar(t *schema.Scalar) ValidationErrors { } func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { + // Keeps track of the PEs we've seen + seen := fieldpath.MakePathElementSet(list.Length()) + // Keeps tracks of the PEs we've counted as duplicates + duplicates := fieldpath.MakePathElementSet(list.Length()) for i := 0; i < list.Length(); i++ { child := list.At(i) - pe, _ := listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if seen.Has(pe) { + if duplicates.Has(pe) { + // do nothing + } else { + v.set.Insert(append(v.path, pe)) + duplicates.Insert(pe) + } + } else { + seen.Insert(pe) + } + } + + for i := 0; i < list.Length(); i++ { + child := list.At(i) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if duplicates.Has(pe) { + continue + } v2 := v.prepareDescent(pe, t.ElementType) v2.value = child errs = append(errs, v2.toFieldSet()...) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go index d63a97fe20..9be9028280 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go @@ -17,8 +17,6 @@ limitations under the License. package typed import ( - "fmt" - "strings" "sync" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" @@ -26,16 +24,24 @@ import ( "sigs.k8s.io/structured-merge-diff/v4/value" ) +// ValidationOptions is the list of all the options available when running the validation. +type ValidationOptions int + +const ( + // AllowDuplicates means that sets and associative lists can have duplicate similar items. + AllowDuplicates ValidationOptions = iota +) + // AsTyped accepts a value and a type and returns a TypedValue. 'v' must have // type 'typeName' in the schema. An error is returned if the v doesn't conform // to the schema. -func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedValue, error) { +func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef, opts ...ValidationOptions) (*TypedValue, error) { tv := &TypedValue{ value: v, typeRef: typeRef, schema: s, } - if err := tv.Validate(); err != nil { + if err := tv.Validate(opts...); err != nil { return nil, err } return tv, nil @@ -45,6 +51,10 @@ func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedVal // conforms to the schema, for cases where that has already been checked or // where you're going to call a method that validates as a side-effect (like // ToFieldSet). +// +// Deprecated: This function was initially created because validation +// was expensive. Now that this has been solved, objects should always +// be created as validated, using `AsTyped`. func AsTypedUnvalidated(v value.Value, s *schema.Schema, typeRef schema.TypeRef) *TypedValue { tv := &TypedValue{ value: v, @@ -77,8 +87,14 @@ func (tv TypedValue) Schema() *schema.Schema { } // Validate returns an error with a list of every spec violation. -func (tv TypedValue) Validate() error { +func (tv TypedValue) Validate(opts ...ValidationOptions) error { w := tv.walker() + for _, opt := range opts { + switch opt { + case AllowDuplicates: + w.allowDuplicates = true + } + } defer w.finished() if errs := w.validate(nil); len(errs) != 0 { return errs @@ -113,6 +129,10 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { return merge(&tv, pso, ruleKeepRHS, nil) } +var cmpwPool = sync.Pool{ + New: func() interface{} { return &compareWalker{} }, +} + // Compare compares the two objects. See the comments on the `Comparison` // struct for details on the return value. // @@ -120,33 +140,44 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { // match), or an error will be returned. Validation errors will be returned if // the objects don't conform to the schema. func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) { - c = &Comparison{ + lhs := tv + if lhs.schema != rhs.schema { + return nil, errorf("expected objects with types from the same schema") + } + if !lhs.typeRef.Equals(&rhs.typeRef) { + return nil, errorf("expected objects of the same type, but got %v and %v", lhs.typeRef, rhs.typeRef) + } + + cmpw := cmpwPool.Get().(*compareWalker) + defer func() { + cmpw.lhs = nil + cmpw.rhs = nil + cmpw.schema = nil + cmpw.typeRef = schema.TypeRef{} + cmpw.comparison = nil + cmpw.inLeaf = false + + cmpwPool.Put(cmpw) + }() + + cmpw.lhs = lhs.value + cmpw.rhs = rhs.value + cmpw.schema = lhs.schema + cmpw.typeRef = lhs.typeRef + cmpw.comparison = &Comparison{ Removed: fieldpath.NewSet(), Modified: fieldpath.NewSet(), Added: fieldpath.NewSet(), } - _, err = merge(&tv, rhs, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } else if !value.Equals(w.rhs, w.lhs) { - // TODO: Equality is not sufficient for this. - // Need to implement equality check on the value type. - c.Modified.Insert(w.path) - } - }, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } - }) - if err != nil { - return nil, err + if cmpw.allocator == nil { + cmpw.allocator = value.NewFreelistAllocator() } - return c, nil + errs := cmpw.compare(nil) + if len(errs) > 0 { + return nil, errs + } + return cmpw.comparison, nil } // RemoveItems removes each provided list or map item from the value. @@ -161,63 +192,6 @@ func (tv TypedValue) ExtractItems(items *fieldpath.Set) *TypedValue { return &tv } -// NormalizeUnions takes the new object and normalizes the union: -// - If discriminator changed to non-nil, and a new field has been added -// that doesn't match, an error is returned, -// - If discriminator hasn't changed and two fields or more are set, an -// error is returned, -// - If discriminator changed to non-nil, all other fields but the -// discriminated one will be cleared, -// - Otherwise, If only one field is left, update discriminator to that value. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnions(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnions(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - -// NormalizeUnionsApply specifically normalize unions on apply. It -// validates that the applied union is correct (there should be no -// ambiguity there), and clear the fields according to the sent intent. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnionsApply(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnionsApply(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - func (tv TypedValue) Empty() *TypedValue { tv.value = value.NewValueInterface(nil) return &tv @@ -273,50 +247,3 @@ func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error) } return out, nil } - -// Comparison is the return value of a TypedValue.Compare() operation. -// -// No field will appear in more than one of the three fieldsets. If all of the -// fieldsets are empty, then the objects must have been equal. -type Comparison struct { - // Removed contains any fields removed by rhs (the right-hand-side - // object in the comparison). - Removed *fieldpath.Set - // Modified contains fields present in both objects but different. - Modified *fieldpath.Set - // Added contains any fields added by rhs. - Added *fieldpath.Set -} - -// IsSame returns true if the comparison returned no changes (the two -// compared objects are similar). -func (c *Comparison) IsSame() bool { - return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() -} - -// String returns a human readable version of the comparison. -func (c *Comparison) String() string { - bld := strings.Builder{} - if !c.Modified.Empty() { - bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) - } - if !c.Added.Empty() { - bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) - } - if !c.Removed.Empty() { - bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) - } - return bld.String() -} - -// ExcludeFields fields from the compare recursively removes the fields -// from the entire comparison -func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { - if fields == nil || fields.Empty() { - return c - } - c.Removed = c.Removed.RecursiveDifference(fields) - c.Modified = c.Modified.RecursiveDifference(fields) - c.Added = c.Added.RecursiveDifference(fields) - return c -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go deleted file mode 100644 index 1fa5d88ae6..0000000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package typed - -import ( - "fmt" - "strings" - - "sigs.k8s.io/structured-merge-diff/v4/schema" - "sigs.k8s.io/structured-merge-diff/v4/value" -) - -func normalizeUnions(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - for _, union := range atom.Map.Unions { - if err := newUnion(&union).Normalize(old, w.rhs.AsMap(), value.NewValueInterface(*w.out).AsMap()); err != nil { - return err - } - } - return nil -} - -func normalizeUnionsApply(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - - for _, union := range atom.Map.Unions { - out := value.NewValueInterface(*w.out) - if err := newUnion(&union).NormalizeApply(old, w.rhs.AsMap(), out.AsMap()); err != nil { - return err - } - *w.out = out.Unstructured() - } - return nil -} - -type discriminated string -type field string - -type discriminatedNames struct { - f2d map[field]discriminated - d2f map[discriminated]field -} - -func newDiscriminatedName(f2d map[field]discriminated) discriminatedNames { - d2f := map[discriminated]field{} - for key, value := range f2d { - d2f[value] = key - } - return discriminatedNames{ - f2d: f2d, - d2f: d2f, - } -} - -func (dn discriminatedNames) toField(d discriminated) field { - if f, ok := dn.d2f[d]; ok { - return f - } - return field(d) -} - -func (dn discriminatedNames) toDiscriminated(f field) discriminated { - if d, ok := dn.f2d[f]; ok { - return d - } - return discriminated(f) -} - -type discriminator struct { - name string -} - -func (d *discriminator) Set(m value.Map, v discriminated) { - if d == nil { - return - } - m.Set(d.name, value.NewValueInterface(string(v))) -} - -func (d *discriminator) Get(m value.Map) discriminated { - if d == nil || m == nil { - return "" - } - val, ok := m.Get(d.name) - if !ok { - return "" - } - if !val.IsString() { - return "" - } - return discriminated(val.AsString()) -} - -type fieldsSet map[field]struct{} - -// newFieldsSet returns a map of the fields that are part of the union and are set -// in the given map. -func newFieldsSet(m value.Map, fields []field) fieldsSet { - if m == nil { - return nil - } - set := fieldsSet{} - for _, f := range fields { - if subField, ok := m.Get(string(f)); ok && !subField.IsNull() { - set.Add(f) - } - } - return set -} - -func (fs fieldsSet) Add(f field) { - if fs == nil { - fs = map[field]struct{}{} - } - fs[f] = struct{}{} -} - -func (fs fieldsSet) One() *field { - for f := range fs { - return &f - } - return nil -} - -func (fs fieldsSet) Has(f field) bool { - _, ok := fs[f] - return ok -} - -func (fs fieldsSet) List() []field { - fields := []field{} - for f := range fs { - fields = append(fields, f) - } - return fields -} - -func (fs fieldsSet) Difference(o fieldsSet) fieldsSet { - n := fieldsSet{} - for f := range fs { - if !o.Has(f) { - n.Add(f) - } - } - return n -} - -func (fs fieldsSet) String() string { - s := []string{} - for k := range fs { - s = append(s, string(k)) - } - return strings.Join(s, ", ") -} - -type union struct { - deduceInvalidDiscriminator bool - d *discriminator - dn discriminatedNames - f []field -} - -func newUnion(su *schema.Union) *union { - u := &union{} - if su.Discriminator != nil { - u.d = &discriminator{name: *su.Discriminator} - } - f2d := map[field]discriminated{} - for _, f := range su.Fields { - u.f = append(u.f, field(f.FieldName)) - f2d[field(f.FieldName)] = discriminated(f.DiscriminatorValue) - } - u.dn = newDiscriminatedName(f2d) - u.deduceInvalidDiscriminator = su.DeduceInvalidDiscriminator - return u -} - -// clear removes all the fields in map that are part of the union, but -// the one we decided to keep. -func (u *union) clear(m value.Map, f field) { - for _, fieldName := range u.f { - if field(fieldName) != f { - m.Delete(string(fieldName)) - } - } -} - -func (u *union) Normalize(old, new, out value.Map) error { - os := newFieldsSet(old, u.f) - ns := newFieldsSet(new, u.f) - diff := ns.Difference(os) - - if u.d.Get(old) != u.d.Get(new) && u.d.Get(new) != "" { - if len(diff) == 1 && u.d.Get(new) != u.dn.toDiscriminated(*diff.One()) { - return fmt.Errorf("discriminator (%v) and field changed (%v) don't match", u.d.Get(new), diff.One()) - } - if len(diff) > 1 { - return fmt.Errorf("multiple new fields added: %v", diff) - } - u.clear(out, u.dn.toField(u.d.Get(new))) - return nil - } - - if len(ns) > 1 { - return fmt.Errorf("multiple fields set without discriminator change: %v", ns) - } - - // Set discriminiator if it needs to be deduced. - if u.deduceInvalidDiscriminator && len(ns) == 1 { - u.d.Set(out, u.dn.toDiscriminated(*ns.One())) - } - - return nil -} - -func (u *union) NormalizeApply(applied, merged, out value.Map) error { - as := newFieldsSet(applied, u.f) - if len(as) > 1 { - return fmt.Errorf("more than one field of union applied: %v", as) - } - if len(as) == 0 { - // None is set, just leave. - return nil - } - // We have exactly one, discriminiator must match if set - if u.d.Get(applied) != "" && u.d.Get(applied) != u.dn.toDiscriminated(*as.One()) { - return fmt.Errorf("applied discriminator (%v) doesn't match applied field (%v)", u.d.Get(applied), *as.One()) - } - - // Update discriminiator if needed - if u.deduceInvalidDiscriminator { - u.d.Set(out, u.dn.toDiscriminated(*as.One())) - } - // Clear others fields. - u.clear(out, *as.One()) - - return nil -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go index 378d30219c..652e24c819 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go @@ -33,6 +33,7 @@ func (tv TypedValue) walker() *validatingObjectWalker { v.value = tv.value v.schema = tv.schema v.typeRef = tv.typeRef + v.allowDuplicates = false if v.allocator == nil { v.allocator = value.NewFreelistAllocator() } @@ -49,6 +50,9 @@ type validatingObjectWalker struct { value value.Value schema *schema.Schema typeRef schema.TypeRef + // If set to true, duplicates will be allowed in + // associativeLists/sets. + allowDuplicates bool // Allocate only as many walkers as needed for the depth by storing them here. spareWalkers *[]*validatingObjectWalker @@ -102,6 +106,12 @@ func validateScalar(t *schema.Scalar, v value.Value, prefix string) (errs Valida if !v.IsBool() { return errorf("%vexpected boolean, got %v", prefix, v) } + case schema.Untyped: + if !v.IsFloat() && !v.IsInt() && !v.IsString() && !v.IsBool() { + return errorf("%vexpected any scalar, got %v", prefix, v) + } + default: + return errorf("%vunexpected scalar type in schema: %v", prefix, *t) } return nil } @@ -123,7 +133,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) pe.Index = &i } else { var err error - pe, err = listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, err = listItemToPathElement(v.allocator, v.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -131,7 +141,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) // this element. return } - if observedKeys.Has(pe) { + if observedKeys.Has(pe) && !v.allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) } observedKeys.Insert(pe) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go index dc8b8c7200..c38402b99a 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go @@ -136,7 +136,7 @@ func (r mapReflect) EqualsUsing(a Allocator, m Map) bool { if !ok { return false } - return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value) + return EqualsUsing(a, vr.mustReuse(lhsVal, entry, nil, nil), value) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go index d8e208628d..c3ae00b180 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go @@ -88,12 +88,12 @@ func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool { } vv := a.allocValueUnstructured() defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { + return other.IterateUsing(a, func(key string, value Value) bool { lhsVal, ok := m[key] if !ok { return false } - return Equals(vv.reuse(lhsVal), value) + return EqualsUsing(a, vv.reuse(lhsVal), value) }) } @@ -168,12 +168,12 @@ func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool { } vv := a.allocValueUnstructured() defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { + return other.IterateUsing(a, func(key string, value Value) bool { lhsVal, ok := m[key] if !ok { return false } - return Equals(vv.reuse(lhsVal), value) + return EqualsUsing(a, vv.reuse(lhsVal), value) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go index a5a467c0f0..f0d58d42cb 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go @@ -154,7 +154,9 @@ func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fi if field.Type.Kind() == reflect.Ptr { e = field.Type.Elem() } - buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) + if e.Kind() == reflect.Struct { + buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) + } continue } info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type}