diff --git a/assets/core/0000_10_namespace-security-allocation-controller_sa.yaml b/assets/core/0000_10_namespace-security-allocation-controller_sa.yaml new file mode 100644 index 0000000000..15d7b3717d --- /dev/null +++ b/assets/core/0000_10_namespace-security-allocation-controller_sa.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: openshift-infra + name: namespace-security-allocation-controller diff --git a/assets/core/0000_50_cluster-openshift-cluster-policy-controller_00_namespace.yaml b/assets/core/0000_50_cluster-openshift-cluster-policy-controller_00_namespace.yaml new file mode 100644 index 0000000000..65d3ecd21d --- /dev/null +++ b/assets/core/0000_50_cluster-openshift-cluster-policy-controller_00_namespace.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Namespace +metadata: + annotations: + openshift.io/node-selector: "" + workload.openshift.io/allowed: "management" + labels: + # set value to avoid depending on kube admission that depends on openshift apis + openshift.io/run-level: "0" + # allow openshift-monitoring to look for ServiceMonitor objects in this namespace + openshift.io/cluster-monitoring: "true" + name: openshift-kube-controller-manager diff --git a/assets/core/0000_80_cluster-openshift-cluster-policy-controller_service-account.yaml b/assets/core/0000_80_cluster-openshift-cluster-policy-controller_service-account.yaml new file mode 100644 index 0000000000..e9735a2cc0 --- /dev/null +++ b/assets/core/0000_80_cluster-openshift-cluster-policy-controller_service-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: openshift-kube-controller-manager + name: openshift-cluster-policy-controller-sa diff --git a/assets/crd/0000_03_securityinternal-openshift_01_rangeallocation.crd.yaml b/assets/crd/0000_03_securityinternal-openshift_01_rangeallocation.crd.yaml new file mode 100644 index 0000000000..5ae9859025 --- /dev/null +++ b/assets/crd/0000_03_securityinternal-openshift_01_rangeallocation.crd.yaml @@ -0,0 +1,49 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/751 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: rangeallocations.security.internal.openshift.io +spec: + group: security.internal.openshift.io + names: + kind: RangeAllocation + listKind: RangeAllocationList + plural: rangeallocations + singular: rangeallocation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: RangeAllocation is used so we can easily expose a RangeAllocation + typed for security group This is an internal API, not intended for external + consumption. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + data: + description: data is a byte array representing the serialized state of + a range allocation. It is a bitmap with each bit set to one to represent + a range is taken. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + range: + description: range is a string representing a unique label for a range + of uids, "1000000000-2000000000/10000". + type: string + type: object + served: true + storage: true diff --git a/assets/rbac/0000_10_cluster-policy-controller_clusterrole.yaml b/assets/rbac/0000_10_cluster-policy-controller_clusterrole.yaml new file mode 100644 index 0000000000..d0fd3445dc --- /dev/null +++ b/assets/rbac/0000_10_cluster-policy-controller_clusterrole.yaml @@ -0,0 +1,33 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + name: namespace-security-allocation-controller +rules: +- apiGroups: + - security.openshift.io + - security.internal.openshift.io + resources: + - rangeallocations + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - update + - watch + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update diff --git a/assets/rbac/0000_10_cluster-policy-controller_clusterrolebinding.yaml b/assets/rbac/0000_10_cluster-policy-controller_clusterrolebinding.yaml new file mode 100644 index 0000000000..ec9324bc6f --- /dev/null +++ b/assets/rbac/0000_10_cluster-policy-controller_clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: namespace-security-allocation-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: namespace-security-allocation-controller +subjects: +- kind: ServiceAccount + name: namespace-security-allocation-controller + namespace: openshift-infra diff --git a/go.mod b/go.mod index 5ab9cb7586..892bb202a6 100644 --- a/go.mod +++ b/go.mod @@ -267,7 +267,7 @@ replace ( github.com/openshift/apiserver-library-go => github.com/openshift/apiserver-library-go v0.0.0-20210721120111-70ce3cad7d84 github.com/openshift/build-machinery-go => github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359 github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20210422153130-25c8450d1535 - github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20210407092538-7021fda6f427 + github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20210720151324-cfbfc8feace0 github.com/pascaldekloe/goe => github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c github.com/pelletier/go-toml => github.com/pelletier/go-toml v1.2.0 github.com/peterbourgon/diskv => github.com/peterbourgon/diskv v2.0.1+incompatible @@ -401,7 +401,7 @@ replace ( k8s.io/kubelet => k8s.io/kubelet v0.21.0 k8s.io/kubernetes => github.com/openshift/kubernetes v0.0.0-20210918023457-a620f506e956 k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.0 - k8s.io/metrics => k8s.io/metrics v0.21.0 + k8s.io/metrics => k8s.io/metrics v0.21.2 k8s.io/mount-utils => k8s.io/mount-utils v0.21.0 k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.0 k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.21.0 @@ -441,30 +441,32 @@ require ( github.com/moby/sys/mount v0.2.0 // indirect github.com/mohae/deepcopy v0.0.0-00010101000000-000000000000 // indirect github.com/openshift/api v0.0.0-20210910062324-a41d3573a3ba - github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e - github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 + github.com/openshift/build-machinery-go v0.0.0-20210806203541-4ea9b6da3a37 + github.com/openshift/client-go v0.0.0-20210831095141-e19a065e79f7 + github.com/openshift/cluster-policy-controller v0.0.0-20210723200948-8fbffaf2b3c7 + github.com/openshift/library-go v0.0.0-20210825122301-7f0bf922c345 github.com/openshift/oauth-apiserver v0.0.0-20210508031825-09435a5dd505 github.com/openshift/openshift-apiserver v0.0.0-20210812003746-628b46ccb012 github.com/openshift/openshift-controller-manager v0.0.0-20210609062424-2e25328c64ac github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.8.1 - github.com/spf13/cobra v1.1.1 + github.com/spf13/cobra v1.1.3 github.com/spf13/pflag v1.0.5 github.com/urfave/negroni v0.0.0-00010101000000-000000000000 // indirect go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.21.1 - k8s.io/apiextensions-apiserver v0.21.0 - k8s.io/apimachinery v0.21.1 - k8s.io/apiserver v0.21.1 + k8s.io/api v0.22.1 + k8s.io/apiextensions-apiserver v0.22.1 + k8s.io/apimachinery v0.22.1 + k8s.io/apiserver v0.22.1 k8s.io/cli-runtime v0.21.0 - k8s.io/client-go v0.21.1 - k8s.io/component-base v0.21.1 - k8s.io/controller-manager v0.21.0 - k8s.io/klog/v2 v2.8.0 - k8s.io/kube-aggregator v0.21.0 + k8s.io/client-go v0.22.1 + k8s.io/component-base v0.22.1 + k8s.io/controller-manager v0.21.2 + k8s.io/klog/v2 v2.9.0 + k8s.io/kube-aggregator v0.22.1 k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 k8s.io/kubectl v0.21.0 - k8s.io/kubernetes v1.21.1 + k8s.io/kubernetes v1.21.2 sigs.k8s.io/yaml v1.2.0 ) diff --git a/go.sum b/go.sum index 299ddede10..19229a33e1 100644 --- a/go.sum +++ b/go.sum @@ -421,6 +421,8 @@ github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359 h1:eh github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/client-go v0.0.0-20210422153130-25c8450d1535 h1:JGSJhDJiQxqUETyqseqeXD7X/hgA6V/F3WW/2dN4QCs= github.com/openshift/client-go v0.0.0-20210422153130-25c8450d1535/go.mod h1:v5/AYttPCjfqMGC1Ed/vutuDpuXmgWc5O+W9nwQ7EtE= +github.com/openshift/cluster-policy-controller v0.0.0-20210723200948-8fbffaf2b3c7 h1:TGfOG8GvODRfk7n0nFTFt9apYkL0ke1i4FZmM2NJwhM= +github.com/openshift/cluster-policy-controller v0.0.0-20210723200948-8fbffaf2b3c7/go.mod h1:/0PACanksGAKueGDm+5bkxiiYaVNlReEMTTVdyqXrZw= github.com/openshift/docker-distribution v0.0.0-20180925154709-d4c35485a70d h1:tupVADlF1SZrGy0Y0kg1FKUi2mVPzRwxVb+8LLMu8ws= github.com/openshift/docker-distribution v0.0.0-20180925154709-d4c35485a70d/go.mod h1:XmfFzbwryblvZ29NebonirM7RBuNEO7+yVCOapaouAk= github.com/openshift/etcd v0.0.0-20210524101026-aefa6bf59b38 h1:4qSF/Ggum/moP9j+QHHXdGgOOIhc7aaLKD3tQAahEic= @@ -433,8 +435,8 @@ github.com/openshift/kubernetes v0.0.0-20210918023457-a620f506e956 h1:+Y0phJ87Xk github.com/openshift/kubernetes v0.0.0-20210918023457-a620f506e956/go.mod h1:L+xhAJq2OM55Nm0E4QjJngTH+BrUQmeC2jnl0jinPV0= github.com/openshift/kubernetes-apiserver v0.0.0-20210527175848-55ee66589915 h1:sFX4VkwXx4bv3YxceFO5PMtRbZxiVi5BiPDCcAN8GxE= github.com/openshift/kubernetes-apiserver v0.0.0-20210527175848-55ee66589915/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= -github.com/openshift/library-go v0.0.0-20210407092538-7021fda6f427 h1:/6Xf107BJIzdfRe9xfuU4xnx7TUHQ7vzDMWiNYPmxfM= -github.com/openshift/library-go v0.0.0-20210407092538-7021fda6f427/go.mod h1:pnz961veImKsbn7pQcuFbcVpCQosYiC1fUOjzEDeOLU= +github.com/openshift/library-go v0.0.0-20210720151324-cfbfc8feace0 h1:uO9z0Yq3L87fGSuqI82VGwfma2dWOWo4J68PaDPxvDE= +github.com/openshift/library-go v0.0.0-20210720151324-cfbfc8feace0/go.mod h1:rln3LbFNOpENSvhmsfH7g/hqc58IF78+o96yAAp5mq0= github.com/openshift/oauth-apiserver v0.0.0-20210508031825-09435a5dd505 h1:6THFBGtaiGS+7iQb+Q9j9tMe+ZnY9KStogtDQA6Xkl4= github.com/openshift/oauth-apiserver v0.0.0-20210508031825-09435a5dd505/go.mod h1:ktVSvRtD7UqyfbWlabNTLDnW92I2kBHzhUcugSdbcqo= github.com/openshift/openshift-apiserver v0.0.0-20210812003746-628b46ccb012 h1:K4PxZepDaMdtdFaQ1ToUgDFi9+hrjEpqUzpCqfNNyZc= @@ -675,8 +677,8 @@ k8s.io/kubelet v0.21.0 h1:1VUfM5vKqLPlWFI0zee6fm9kwIZ/UEOGCodVFN+OZrg= k8s.io/kubelet v0.21.0/go.mod h1:G5ZxMTVev9t4bhmsSxDAWhH6wXDYEVHVVFyYsw4laR4= k8s.io/legacy-cloud-providers v0.21.0 h1:iWf5xaX9yvYT5mkz8UB96UtISQ5IkrWeuMPMhRp01ZY= k8s.io/legacy-cloud-providers v0.21.0/go.mod h1:bNxo7gDg+PGkBmT/MFZswLTWdSWK9kAlS1s8DJca5q4= -k8s.io/metrics v0.21.0 h1:uwS3CgheLKaw3PTpwhjMswnm/PMqeLbdLH88VI7FMQQ= -k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ= +k8s.io/metrics v0.21.2 h1:6ajprhWZnI64RSrNqET0cBdwzaxPxr9Vh8zURBkR1zY= +k8s.io/metrics v0.21.2/go.mod h1:wzlOINZMCtWq8dR9gHlyaOemmYlOpAoldEIXE82gAhI= k8s.io/mount-utils v0.21.0 h1:Z8mCBpIBG26Q9TFg6d0Wvai6AL1mMPqSYBbNVxo6J2A= k8s.io/mount-utils v0.21.0/go.mod h1:dwXbIPxKtTjrBEaX1aK/CMEf1KZ8GzMHpe3NEBfdFXI= k8s.io/sample-apiserver v0.21.0/go.mod h1:yMffYq14yQZtuVPVBGaBJ+3Scb2xHT6QeqFfk3v+AEY= diff --git a/pkg/assets/core/bindata.go b/pkg/assets/core/bindata.go index 604800d503..b65566511f 100644 --- a/pkg/assets/core/bindata.go +++ b/pkg/assets/core/bindata.go @@ -2,6 +2,8 @@ // sources: // assets/core/0000_00_flannel-configmap.yaml // assets/core/0000_00_flannel-service-account.yaml +// assets/core/0000_10_namespace-security-allocation-controller_sa.yaml +// assets/core/0000_50_cluster-openshift-cluster-policy-controller_00_namespace.yaml // assets/core/0000_50_cluster-openshift-controller-manager_00_namespace.yaml // assets/core/0000_60_service-ca_01_namespace.yaml // assets/core/0000_60_service-ca_04_sa.yaml @@ -10,6 +12,7 @@ // assets/core/0000_70_dns_01-dns-service-account.yaml // assets/core/0000_70_dns_01-node-resolver-service-account.yaml // assets/core/0000_70_dns_01-service.yaml +// assets/core/0000_80_cluster-openshift-cluster-policy-controller_service-account.yaml // assets/core/0000_80_hostpath-provisioner-namespace.yaml // assets/core/0000_80_hostpath-provisioner-serviceaccount.yaml // assets/core/0000_80_openshift-router-cm.yaml @@ -144,6 +147,57 @@ func assetsCore0000_00_flannelServiceAccountYaml() (*asset, error) { return a, nil } +var _assetsCore0000_10_namespaceSecurityAllocationController_saYaml = []byte(`apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: openshift-infra + name: namespace-security-allocation-controller +`) + +func assetsCore0000_10_namespaceSecurityAllocationController_saYamlBytes() ([]byte, error) { + return _assetsCore0000_10_namespaceSecurityAllocationController_saYaml, nil +} + +func assetsCore0000_10_namespaceSecurityAllocationController_saYaml() (*asset, error) { + bytes, err := assetsCore0000_10_namespaceSecurityAllocationController_saYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/core/0000_10_namespace-security-allocation-controller_sa.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsCore0000_50_clusterOpenshiftClusterPolicyController_00_namespaceYaml = []byte(`apiVersion: v1 +kind: Namespace +metadata: + annotations: + openshift.io/node-selector: "" + workload.openshift.io/allowed: "management" + labels: + # set value to avoid depending on kube admission that depends on openshift apis + openshift.io/run-level: "0" + # allow openshift-monitoring to look for ServiceMonitor objects in this namespace + openshift.io/cluster-monitoring: "true" + name: openshift-kube-controller-manager +`) + +func assetsCore0000_50_clusterOpenshiftClusterPolicyController_00_namespaceYamlBytes() ([]byte, error) { + return _assetsCore0000_50_clusterOpenshiftClusterPolicyController_00_namespaceYaml, nil +} + +func assetsCore0000_50_clusterOpenshiftClusterPolicyController_00_namespaceYaml() (*asset, error) { + bytes, err := assetsCore0000_50_clusterOpenshiftClusterPolicyController_00_namespaceYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/core/0000_50_cluster-openshift-cluster-policy-controller_00_namespace.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml = []byte(`apiVersion: v1 kind: Namespace metadata: @@ -382,6 +436,28 @@ func assetsCore0000_70_dns_01ServiceYaml() (*asset, error) { return a, nil } +var _assetsCore0000_80_clusterOpenshiftClusterPolicyController_serviceAccountYaml = []byte(`apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: openshift-kube-controller-manager + name: openshift-cluster-policy-controller-sa +`) + +func assetsCore0000_80_clusterOpenshiftClusterPolicyController_serviceAccountYamlBytes() ([]byte, error) { + return _assetsCore0000_80_clusterOpenshiftClusterPolicyController_serviceAccountYaml, nil +} + +func assetsCore0000_80_clusterOpenshiftClusterPolicyController_serviceAccountYaml() (*asset, error) { + bytes, err := assetsCore0000_80_clusterOpenshiftClusterPolicyController_serviceAccountYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/core/0000_80_cluster-openshift-cluster-policy-controller_service-account.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _assetsCore0000_80_hostpathProvisionerNamespaceYaml = []byte(`apiVersion: v1 kind: Namespace metadata: @@ -638,23 +714,26 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "assets/core/0000_00_flannel-configmap.yaml": assetsCore0000_00_flannelConfigmapYaml, - "assets/core/0000_00_flannel-service-account.yaml": assetsCore0000_00_flannelServiceAccountYaml, - "assets/core/0000_50_cluster-openshift-controller-manager_00_namespace.yaml": assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml, - "assets/core/0000_60_service-ca_01_namespace.yaml": assetsCore0000_60_serviceCa_01_namespaceYaml, - "assets/core/0000_60_service-ca_04_sa.yaml": assetsCore0000_60_serviceCa_04_saYaml, - "assets/core/0000_70_dns_00-namespace.yaml": assetsCore0000_70_dns_00NamespaceYaml, - "assets/core/0000_70_dns_01-configmap.yaml": assetsCore0000_70_dns_01ConfigmapYaml, - "assets/core/0000_70_dns_01-dns-service-account.yaml": assetsCore0000_70_dns_01DnsServiceAccountYaml, - "assets/core/0000_70_dns_01-node-resolver-service-account.yaml": assetsCore0000_70_dns_01NodeResolverServiceAccountYaml, - "assets/core/0000_70_dns_01-service.yaml": assetsCore0000_70_dns_01ServiceYaml, - "assets/core/0000_80_hostpath-provisioner-namespace.yaml": assetsCore0000_80_hostpathProvisionerNamespaceYaml, - "assets/core/0000_80_hostpath-provisioner-serviceaccount.yaml": assetsCore0000_80_hostpathProvisionerServiceaccountYaml, - "assets/core/0000_80_openshift-router-cm.yaml": assetsCore0000_80_openshiftRouterCmYaml, - "assets/core/0000_80_openshift-router-external-service.yaml": assetsCore0000_80_openshiftRouterExternalServiceYaml, - "assets/core/0000_80_openshift-router-namespace.yaml": assetsCore0000_80_openshiftRouterNamespaceYaml, - "assets/core/0000_80_openshift-router-service-account.yaml": assetsCore0000_80_openshiftRouterServiceAccountYaml, - "assets/core/0000_80_openshift-router-service.yaml": assetsCore0000_80_openshiftRouterServiceYaml, + "assets/core/0000_00_flannel-configmap.yaml": assetsCore0000_00_flannelConfigmapYaml, + "assets/core/0000_00_flannel-service-account.yaml": assetsCore0000_00_flannelServiceAccountYaml, + "assets/core/0000_10_namespace-security-allocation-controller_sa.yaml": assetsCore0000_10_namespaceSecurityAllocationController_saYaml, + "assets/core/0000_50_cluster-openshift-cluster-policy-controller_00_namespace.yaml": assetsCore0000_50_clusterOpenshiftClusterPolicyController_00_namespaceYaml, + "assets/core/0000_50_cluster-openshift-controller-manager_00_namespace.yaml": assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml, + "assets/core/0000_60_service-ca_01_namespace.yaml": assetsCore0000_60_serviceCa_01_namespaceYaml, + "assets/core/0000_60_service-ca_04_sa.yaml": assetsCore0000_60_serviceCa_04_saYaml, + "assets/core/0000_70_dns_00-namespace.yaml": assetsCore0000_70_dns_00NamespaceYaml, + "assets/core/0000_70_dns_01-configmap.yaml": assetsCore0000_70_dns_01ConfigmapYaml, + "assets/core/0000_70_dns_01-dns-service-account.yaml": assetsCore0000_70_dns_01DnsServiceAccountYaml, + "assets/core/0000_70_dns_01-node-resolver-service-account.yaml": assetsCore0000_70_dns_01NodeResolverServiceAccountYaml, + "assets/core/0000_70_dns_01-service.yaml": assetsCore0000_70_dns_01ServiceYaml, + "assets/core/0000_80_cluster-openshift-cluster-policy-controller_service-account.yaml": assetsCore0000_80_clusterOpenshiftClusterPolicyController_serviceAccountYaml, + "assets/core/0000_80_hostpath-provisioner-namespace.yaml": assetsCore0000_80_hostpathProvisionerNamespaceYaml, + "assets/core/0000_80_hostpath-provisioner-serviceaccount.yaml": assetsCore0000_80_hostpathProvisionerServiceaccountYaml, + "assets/core/0000_80_openshift-router-cm.yaml": assetsCore0000_80_openshiftRouterCmYaml, + "assets/core/0000_80_openshift-router-external-service.yaml": assetsCore0000_80_openshiftRouterExternalServiceYaml, + "assets/core/0000_80_openshift-router-namespace.yaml": assetsCore0000_80_openshiftRouterNamespaceYaml, + "assets/core/0000_80_openshift-router-service-account.yaml": assetsCore0000_80_openshiftRouterServiceAccountYaml, + "assets/core/0000_80_openshift-router-service.yaml": assetsCore0000_80_openshiftRouterServiceYaml, } // AssetDir returns the file names below a certain @@ -700,23 +779,26 @@ type bintree struct { var _bintree = &bintree{nil, map[string]*bintree{ "assets": {nil, map[string]*bintree{ "core": {nil, map[string]*bintree{ - "0000_00_flannel-configmap.yaml": {assetsCore0000_00_flannelConfigmapYaml, map[string]*bintree{}}, - "0000_00_flannel-service-account.yaml": {assetsCore0000_00_flannelServiceAccountYaml, map[string]*bintree{}}, - "0000_50_cluster-openshift-controller-manager_00_namespace.yaml": {assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml, map[string]*bintree{}}, - "0000_60_service-ca_01_namespace.yaml": {assetsCore0000_60_serviceCa_01_namespaceYaml, map[string]*bintree{}}, - "0000_60_service-ca_04_sa.yaml": {assetsCore0000_60_serviceCa_04_saYaml, map[string]*bintree{}}, - "0000_70_dns_00-namespace.yaml": {assetsCore0000_70_dns_00NamespaceYaml, map[string]*bintree{}}, - "0000_70_dns_01-configmap.yaml": {assetsCore0000_70_dns_01ConfigmapYaml, map[string]*bintree{}}, - "0000_70_dns_01-dns-service-account.yaml": {assetsCore0000_70_dns_01DnsServiceAccountYaml, map[string]*bintree{}}, - "0000_70_dns_01-node-resolver-service-account.yaml": {assetsCore0000_70_dns_01NodeResolverServiceAccountYaml, map[string]*bintree{}}, - "0000_70_dns_01-service.yaml": {assetsCore0000_70_dns_01ServiceYaml, map[string]*bintree{}}, - "0000_80_hostpath-provisioner-namespace.yaml": {assetsCore0000_80_hostpathProvisionerNamespaceYaml, map[string]*bintree{}}, - "0000_80_hostpath-provisioner-serviceaccount.yaml": {assetsCore0000_80_hostpathProvisionerServiceaccountYaml, map[string]*bintree{}}, - "0000_80_openshift-router-cm.yaml": {assetsCore0000_80_openshiftRouterCmYaml, map[string]*bintree{}}, - "0000_80_openshift-router-external-service.yaml": {assetsCore0000_80_openshiftRouterExternalServiceYaml, map[string]*bintree{}}, - "0000_80_openshift-router-namespace.yaml": {assetsCore0000_80_openshiftRouterNamespaceYaml, map[string]*bintree{}}, - "0000_80_openshift-router-service-account.yaml": {assetsCore0000_80_openshiftRouterServiceAccountYaml, map[string]*bintree{}}, - "0000_80_openshift-router-service.yaml": {assetsCore0000_80_openshiftRouterServiceYaml, map[string]*bintree{}}, + "0000_00_flannel-configmap.yaml": {assetsCore0000_00_flannelConfigmapYaml, map[string]*bintree{}}, + "0000_00_flannel-service-account.yaml": {assetsCore0000_00_flannelServiceAccountYaml, map[string]*bintree{}}, + "0000_10_namespace-security-allocation-controller_sa.yaml": {assetsCore0000_10_namespaceSecurityAllocationController_saYaml, map[string]*bintree{}}, + "0000_50_cluster-openshift-cluster-policy-controller_00_namespace.yaml": {assetsCore0000_50_clusterOpenshiftClusterPolicyController_00_namespaceYaml, map[string]*bintree{}}, + "0000_50_cluster-openshift-controller-manager_00_namespace.yaml": {assetsCore0000_50_clusterOpenshiftControllerManager_00_namespaceYaml, map[string]*bintree{}}, + "0000_60_service-ca_01_namespace.yaml": {assetsCore0000_60_serviceCa_01_namespaceYaml, map[string]*bintree{}}, + "0000_60_service-ca_04_sa.yaml": {assetsCore0000_60_serviceCa_04_saYaml, map[string]*bintree{}}, + "0000_70_dns_00-namespace.yaml": {assetsCore0000_70_dns_00NamespaceYaml, map[string]*bintree{}}, + "0000_70_dns_01-configmap.yaml": {assetsCore0000_70_dns_01ConfigmapYaml, map[string]*bintree{}}, + "0000_70_dns_01-dns-service-account.yaml": {assetsCore0000_70_dns_01DnsServiceAccountYaml, map[string]*bintree{}}, + "0000_70_dns_01-node-resolver-service-account.yaml": {assetsCore0000_70_dns_01NodeResolverServiceAccountYaml, map[string]*bintree{}}, + "0000_70_dns_01-service.yaml": {assetsCore0000_70_dns_01ServiceYaml, map[string]*bintree{}}, + "0000_80_cluster-openshift-cluster-policy-controller_service-account.yaml": {assetsCore0000_80_clusterOpenshiftClusterPolicyController_serviceAccountYaml, map[string]*bintree{}}, + "0000_80_hostpath-provisioner-namespace.yaml": {assetsCore0000_80_hostpathProvisionerNamespaceYaml, map[string]*bintree{}}, + "0000_80_hostpath-provisioner-serviceaccount.yaml": {assetsCore0000_80_hostpathProvisionerServiceaccountYaml, map[string]*bintree{}}, + "0000_80_openshift-router-cm.yaml": {assetsCore0000_80_openshiftRouterCmYaml, map[string]*bintree{}}, + "0000_80_openshift-router-external-service.yaml": {assetsCore0000_80_openshiftRouterExternalServiceYaml, map[string]*bintree{}}, + "0000_80_openshift-router-namespace.yaml": {assetsCore0000_80_openshiftRouterNamespaceYaml, map[string]*bintree{}}, + "0000_80_openshift-router-service-account.yaml": {assetsCore0000_80_openshiftRouterServiceAccountYaml, map[string]*bintree{}}, + "0000_80_openshift-router-service.yaml": {assetsCore0000_80_openshiftRouterServiceYaml, map[string]*bintree{}}, }}, }}, }} diff --git a/pkg/assets/crd.go b/pkg/assets/crd.go index 88f3072e4a..28d43f0e40 100755 --- a/pkg/assets/crd.go +++ b/pkg/assets/crd.go @@ -41,6 +41,7 @@ var ( "assets/crd/0000_10_config-operator_01_image.crd.yaml", "assets/crd/0000_03_config-operator_01_proxy.crd.yaml", "assets/crd/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml", + "assets/crd/0000_03_securityinternal-openshift_01_rangeallocation.crd.yaml", } ) diff --git a/pkg/assets/crd/bindata.go b/pkg/assets/crd/bindata.go index fd84306756..e570a6b28f 100644 --- a/pkg/assets/crd/bindata.go +++ b/pkg/assets/crd/bindata.go @@ -4,6 +4,7 @@ // assets/crd/0000_03_config-operator_01_proxy.crd.yaml // assets/crd/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml // assets/crd/0000_03_security-openshift_01_scc.crd.yaml +// assets/crd/0000_03_securityinternal-openshift_01_rangeallocation.crd.yaml // assets/crd/0000_10_config-operator_01_build.crd.yaml // assets/crd/0000_10_config-operator_01_featuregate.crd.yaml // assets/crd/0000_10_config-operator_01_image.crd.yaml @@ -1056,6 +1057,72 @@ func assetsCrd0000_03_securityOpenshift_01_sccCrdYaml() (*asset, error) { return a, nil } +var _assetsCrd0000_03_securityinternalOpenshift_01_rangeallocationCrdYaml = []byte(`apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/751 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: rangeallocations.security.internal.openshift.io +spec: + group: security.internal.openshift.io + names: + kind: RangeAllocation + listKind: RangeAllocationList + plural: rangeallocations + singular: rangeallocation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: RangeAllocation is used so we can easily expose a RangeAllocation + typed for security group This is an internal API, not intended for external + consumption. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + data: + description: data is a byte array representing the serialized state of + a range allocation. It is a bitmap with each bit set to one to represent + a range is taken. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + range: + description: range is a string representing a unique label for a range + of uids, "1000000000-2000000000/10000". + type: string + type: object + served: true + storage: true +`) + +func assetsCrd0000_03_securityinternalOpenshift_01_rangeallocationCrdYamlBytes() ([]byte, error) { + return _assetsCrd0000_03_securityinternalOpenshift_01_rangeallocationCrdYaml, nil +} + +func assetsCrd0000_03_securityinternalOpenshift_01_rangeallocationCrdYaml() (*asset, error) { + bytes, err := assetsCrd0000_03_securityinternalOpenshift_01_rangeallocationCrdYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/crd/0000_03_securityinternal-openshift_01_rangeallocation.crd.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _assetsCrd0000_10_configOperator_01_buildCrdYaml = []byte(`apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -3476,6 +3543,7 @@ var _bindata = map[string]func() (*asset, error){ "assets/crd/0000_03_config-operator_01_proxy.crd.yaml": assetsCrd0000_03_configOperator_01_proxyCrdYaml, "assets/crd/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml": assetsCrd0000_03_quotaOpenshift_01_clusterresourcequotaCrdYaml, "assets/crd/0000_03_security-openshift_01_scc.crd.yaml": assetsCrd0000_03_securityOpenshift_01_sccCrdYaml, + "assets/crd/0000_03_securityinternal-openshift_01_rangeallocation.crd.yaml": assetsCrd0000_03_securityinternalOpenshift_01_rangeallocationCrdYaml, "assets/crd/0000_10_config-operator_01_build.crd.yaml": assetsCrd0000_10_configOperator_01_buildCrdYaml, "assets/crd/0000_10_config-operator_01_featuregate.crd.yaml": assetsCrd0000_10_configOperator_01_featuregateCrdYaml, "assets/crd/0000_10_config-operator_01_image.crd.yaml": assetsCrd0000_10_configOperator_01_imageCrdYaml, @@ -3530,6 +3598,7 @@ var _bintree = &bintree{nil, map[string]*bintree{ "0000_03_config-operator_01_proxy.crd.yaml": {assetsCrd0000_03_configOperator_01_proxyCrdYaml, map[string]*bintree{}}, "0000_03_quota-openshift_01_clusterresourcequota.crd.yaml": {assetsCrd0000_03_quotaOpenshift_01_clusterresourcequotaCrdYaml, map[string]*bintree{}}, "0000_03_security-openshift_01_scc.crd.yaml": {assetsCrd0000_03_securityOpenshift_01_sccCrdYaml, map[string]*bintree{}}, + "0000_03_securityinternal-openshift_01_rangeallocation.crd.yaml": {assetsCrd0000_03_securityinternalOpenshift_01_rangeallocationCrdYaml, map[string]*bintree{}}, "0000_10_config-operator_01_build.crd.yaml": {assetsCrd0000_10_configOperator_01_buildCrdYaml, map[string]*bintree{}}, "0000_10_config-operator_01_featuregate.crd.yaml": {assetsCrd0000_10_configOperator_01_featuregateCrdYaml, map[string]*bintree{}}, "0000_10_config-operator_01_image.crd.yaml": {assetsCrd0000_10_configOperator_01_imageCrdYaml, map[string]*bintree{}}, diff --git a/pkg/assets/rbac/bindata.go b/pkg/assets/rbac/bindata.go index 6f5979161a..0c385fed7b 100644 --- a/pkg/assets/rbac/bindata.go +++ b/pkg/assets/rbac/bindata.go @@ -3,6 +3,8 @@ // assets/rbac/0000_00_flannel-clusterrole.yaml // assets/rbac/0000_00_flannel-clusterrolebinding.yaml // assets/rbac/0000_00_podsecuritypolicy-flannel.yaml +// assets/rbac/0000_10_cluster-policy-controller_clusterrole.yaml +// assets/rbac/0000_10_cluster-policy-controller_clusterrolebinding.yaml // assets/rbac/0000_60_service-ca_00_clusterrole.yaml // assets/rbac/0000_60_service-ca_00_clusterrolebinding.yaml // assets/rbac/0000_60_service-ca_00_role.yaml @@ -200,6 +202,85 @@ func assetsRbac0000_00_podsecuritypolicyFlannelYaml() (*asset, error) { return a, nil } +var _assetsRbac0000_10_clusterPolicyController_clusterroleYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + name: namespace-security-allocation-controller +rules: +- apiGroups: + - security.openshift.io + - security.internal.openshift.io + resources: + - rangeallocations + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - update + - watch + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update +`) + +func assetsRbac0000_10_clusterPolicyController_clusterroleYamlBytes() ([]byte, error) { + return _assetsRbac0000_10_clusterPolicyController_clusterroleYaml, nil +} + +func assetsRbac0000_10_clusterPolicyController_clusterroleYaml() (*asset, error) { + bytes, err := assetsRbac0000_10_clusterPolicyController_clusterroleYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/rbac/0000_10_cluster-policy-controller_clusterrole.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsRbac0000_10_clusterPolicyController_clusterrolebindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: namespace-security-allocation-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: namespace-security-allocation-controller +subjects: +- kind: ServiceAccount + name: namespace-security-allocation-controller + namespace: openshift-infra +`) + +func assetsRbac0000_10_clusterPolicyController_clusterrolebindingYamlBytes() ([]byte, error) { + return _assetsRbac0000_10_clusterPolicyController_clusterrolebindingYaml, nil +} + +func assetsRbac0000_10_clusterPolicyController_clusterrolebindingYaml() (*asset, error) { + bytes, err := assetsRbac0000_10_clusterPolicyController_clusterrolebindingYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/rbac/0000_10_cluster-policy-controller_clusterrolebinding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _assetsRbac0000_60_serviceCa_00_clusterroleYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -712,19 +793,21 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "assets/rbac/0000_00_flannel-clusterrole.yaml": assetsRbac0000_00_flannelClusterroleYaml, - "assets/rbac/0000_00_flannel-clusterrolebinding.yaml": assetsRbac0000_00_flannelClusterrolebindingYaml, - "assets/rbac/0000_00_podsecuritypolicy-flannel.yaml": assetsRbac0000_00_podsecuritypolicyFlannelYaml, - "assets/rbac/0000_60_service-ca_00_clusterrole.yaml": assetsRbac0000_60_serviceCa_00_clusterroleYaml, - "assets/rbac/0000_60_service-ca_00_clusterrolebinding.yaml": assetsRbac0000_60_serviceCa_00_clusterrolebindingYaml, - "assets/rbac/0000_60_service-ca_00_role.yaml": assetsRbac0000_60_serviceCa_00_roleYaml, - "assets/rbac/0000_60_service-ca_00_rolebinding.yaml": assetsRbac0000_60_serviceCa_00_rolebindingYaml, - "assets/rbac/0000_70_dns_01-cluster-role-binding.yaml": assetsRbac0000_70_dns_01ClusterRoleBindingYaml, - "assets/rbac/0000_70_dns_01-cluster-role.yaml": assetsRbac0000_70_dns_01ClusterRoleYaml, - "assets/rbac/0000_80_hostpath-provisioner-clusterrole.yaml": assetsRbac0000_80_hostpathProvisionerClusterroleYaml, - "assets/rbac/0000_80_hostpath-provisioner-clusterrolebinding.yaml": assetsRbac0000_80_hostpathProvisionerClusterrolebindingYaml, - "assets/rbac/0000_80_openshift-router-cluster-role-binding.yaml": assetsRbac0000_80_openshiftRouterClusterRoleBindingYaml, - "assets/rbac/0000_80_openshift-router-cluster-role.yaml": assetsRbac0000_80_openshiftRouterClusterRoleYaml, + "assets/rbac/0000_00_flannel-clusterrole.yaml": assetsRbac0000_00_flannelClusterroleYaml, + "assets/rbac/0000_00_flannel-clusterrolebinding.yaml": assetsRbac0000_00_flannelClusterrolebindingYaml, + "assets/rbac/0000_00_podsecuritypolicy-flannel.yaml": assetsRbac0000_00_podsecuritypolicyFlannelYaml, + "assets/rbac/0000_10_cluster-policy-controller_clusterrole.yaml": assetsRbac0000_10_clusterPolicyController_clusterroleYaml, + "assets/rbac/0000_10_cluster-policy-controller_clusterrolebinding.yaml": assetsRbac0000_10_clusterPolicyController_clusterrolebindingYaml, + "assets/rbac/0000_60_service-ca_00_clusterrole.yaml": assetsRbac0000_60_serviceCa_00_clusterroleYaml, + "assets/rbac/0000_60_service-ca_00_clusterrolebinding.yaml": assetsRbac0000_60_serviceCa_00_clusterrolebindingYaml, + "assets/rbac/0000_60_service-ca_00_role.yaml": assetsRbac0000_60_serviceCa_00_roleYaml, + "assets/rbac/0000_60_service-ca_00_rolebinding.yaml": assetsRbac0000_60_serviceCa_00_rolebindingYaml, + "assets/rbac/0000_70_dns_01-cluster-role-binding.yaml": assetsRbac0000_70_dns_01ClusterRoleBindingYaml, + "assets/rbac/0000_70_dns_01-cluster-role.yaml": assetsRbac0000_70_dns_01ClusterRoleYaml, + "assets/rbac/0000_80_hostpath-provisioner-clusterrole.yaml": assetsRbac0000_80_hostpathProvisionerClusterroleYaml, + "assets/rbac/0000_80_hostpath-provisioner-clusterrolebinding.yaml": assetsRbac0000_80_hostpathProvisionerClusterrolebindingYaml, + "assets/rbac/0000_80_openshift-router-cluster-role-binding.yaml": assetsRbac0000_80_openshiftRouterClusterRoleBindingYaml, + "assets/rbac/0000_80_openshift-router-cluster-role.yaml": assetsRbac0000_80_openshiftRouterClusterRoleYaml, } // AssetDir returns the file names below a certain @@ -770,19 +853,21 @@ type bintree struct { var _bintree = &bintree{nil, map[string]*bintree{ "assets": {nil, map[string]*bintree{ "rbac": {nil, map[string]*bintree{ - "0000_00_flannel-clusterrole.yaml": {assetsRbac0000_00_flannelClusterroleYaml, map[string]*bintree{}}, - "0000_00_flannel-clusterrolebinding.yaml": {assetsRbac0000_00_flannelClusterrolebindingYaml, map[string]*bintree{}}, - "0000_00_podsecuritypolicy-flannel.yaml": {assetsRbac0000_00_podsecuritypolicyFlannelYaml, map[string]*bintree{}}, - "0000_60_service-ca_00_clusterrole.yaml": {assetsRbac0000_60_serviceCa_00_clusterroleYaml, map[string]*bintree{}}, - "0000_60_service-ca_00_clusterrolebinding.yaml": {assetsRbac0000_60_serviceCa_00_clusterrolebindingYaml, map[string]*bintree{}}, - "0000_60_service-ca_00_role.yaml": {assetsRbac0000_60_serviceCa_00_roleYaml, map[string]*bintree{}}, - "0000_60_service-ca_00_rolebinding.yaml": {assetsRbac0000_60_serviceCa_00_rolebindingYaml, map[string]*bintree{}}, - "0000_70_dns_01-cluster-role-binding.yaml": {assetsRbac0000_70_dns_01ClusterRoleBindingYaml, map[string]*bintree{}}, - "0000_70_dns_01-cluster-role.yaml": {assetsRbac0000_70_dns_01ClusterRoleYaml, map[string]*bintree{}}, - "0000_80_hostpath-provisioner-clusterrole.yaml": {assetsRbac0000_80_hostpathProvisionerClusterroleYaml, map[string]*bintree{}}, - "0000_80_hostpath-provisioner-clusterrolebinding.yaml": {assetsRbac0000_80_hostpathProvisionerClusterrolebindingYaml, map[string]*bintree{}}, - "0000_80_openshift-router-cluster-role-binding.yaml": {assetsRbac0000_80_openshiftRouterClusterRoleBindingYaml, map[string]*bintree{}}, - "0000_80_openshift-router-cluster-role.yaml": {assetsRbac0000_80_openshiftRouterClusterRoleYaml, map[string]*bintree{}}, + "0000_00_flannel-clusterrole.yaml": {assetsRbac0000_00_flannelClusterroleYaml, map[string]*bintree{}}, + "0000_00_flannel-clusterrolebinding.yaml": {assetsRbac0000_00_flannelClusterrolebindingYaml, map[string]*bintree{}}, + "0000_00_podsecuritypolicy-flannel.yaml": {assetsRbac0000_00_podsecuritypolicyFlannelYaml, map[string]*bintree{}}, + "0000_10_cluster-policy-controller_clusterrole.yaml": {assetsRbac0000_10_clusterPolicyController_clusterroleYaml, map[string]*bintree{}}, + "0000_10_cluster-policy-controller_clusterrolebinding.yaml": {assetsRbac0000_10_clusterPolicyController_clusterrolebindingYaml, map[string]*bintree{}}, + "0000_60_service-ca_00_clusterrole.yaml": {assetsRbac0000_60_serviceCa_00_clusterroleYaml, map[string]*bintree{}}, + "0000_60_service-ca_00_clusterrolebinding.yaml": {assetsRbac0000_60_serviceCa_00_clusterrolebindingYaml, map[string]*bintree{}}, + "0000_60_service-ca_00_role.yaml": {assetsRbac0000_60_serviceCa_00_roleYaml, map[string]*bintree{}}, + "0000_60_service-ca_00_rolebinding.yaml": {assetsRbac0000_60_serviceCa_00_rolebindingYaml, map[string]*bintree{}}, + "0000_70_dns_01-cluster-role-binding.yaml": {assetsRbac0000_70_dns_01ClusterRoleBindingYaml, map[string]*bintree{}}, + "0000_70_dns_01-cluster-role.yaml": {assetsRbac0000_70_dns_01ClusterRoleYaml, map[string]*bintree{}}, + "0000_80_hostpath-provisioner-clusterrole.yaml": {assetsRbac0000_80_hostpathProvisionerClusterroleYaml, map[string]*bintree{}}, + "0000_80_hostpath-provisioner-clusterrolebinding.yaml": {assetsRbac0000_80_hostpathProvisionerClusterrolebindingYaml, map[string]*bintree{}}, + "0000_80_openshift-router-cluster-role-binding.yaml": {assetsRbac0000_80_openshiftRouterClusterRoleBindingYaml, map[string]*bintree{}}, + "0000_80_openshift-router-cluster-role.yaml": {assetsRbac0000_80_openshiftRouterClusterRoleYaml, map[string]*bintree{}}, }}, }}, }} diff --git a/pkg/cmd/init.go b/pkg/cmd/init.go index 698ff2c09b..9326588ee9 100644 --- a/pkg/cmd/init.go +++ b/pkg/cmd/init.go @@ -136,6 +136,12 @@ func initCerts(cfg *config.MicroshiftConfig) error { "openshift-oauth-apiserver.svc", "kubernetes.default.svc", "kubernetes.default", "kubernetes", "localhost"}); err != nil { return err } + if err := util.GenCerts("openshift-cluster-policy-controller", cfg.DataDir+"/resources/openshift-cluster-policy-controller/secrets", + "tls.crt", "tls.key", + []string{"openshift-cluster-policy-controller", cfg.NodeIP, cfg.NodeName, "openshift-cluster-policy-controller.default.svc", "openshift-cluster-policy-controller.default", + "127.0.0.1", "kubernetes.default.svc", "kubernetes.default", "kubernetes", "localhost"}); err != nil { + return err + } return nil } diff --git a/pkg/cmd/run.go b/pkg/cmd/run.go index 8646bb1ce3..6fd6a71f9d 100644 --- a/pkg/cmd/run.go +++ b/pkg/cmd/run.go @@ -76,6 +76,7 @@ func RunMicroshift(cfg *config.MicroshiftConfig, flags *pflag.FlagSet) error { util.Must(m.AddService(controllers.NewOpenShiftPrepJob(cfg))) util.Must(m.AddService(controllers.NewOpenShiftAPIServer(cfg))) util.Must(m.AddService(controllers.NewOpenShiftOAuth(cfg))) + util.Must(m.AddService(controllers.NewOpenShiftClusterPolicyController(cfg))) util.Must(m.AddService(controllers.NewOpenShiftAPIComponents(cfg))) util.Must(m.AddService(mdns.NewMicroShiftmDNSController(cfg))) diff --git a/pkg/components/cluster-policy-controller.go b/pkg/components/cluster-policy-controller.go new file mode 100644 index 0000000000..9cdd718455 --- /dev/null +++ b/pkg/components/cluster-policy-controller.go @@ -0,0 +1,35 @@ +package components + +import ( + "github.com/openshift/microshift/pkg/assets" + "github.com/sirupsen/logrus" +) + +func startClusterPolicyController(kubeconfigPath string) error { + var ( + cr = []string{ + "assets/rbac/0000_10_cluster-policy-controller_clusterrole.yaml", + } + crb = []string{ + "assets/rbac/0000_10_cluster-policy-controller_clusterrolebinding.yaml", + } + sa = []string{ + "assets/core/0000_10_namespace-security-allocation-controller_sa.yaml", + } + ) + + if err := assets.ApplyClusterRoles(cr, kubeconfigPath); err != nil { + logrus.Warningf("failed to apply clusterrole %v: %v", cr, err) + return err + } + if err := assets.ApplyClusterRoleBindings(crb, kubeconfigPath); err != nil { + logrus.Warningf("failed to apply clusterrolebinding %v: %v", crb, err) + return err + } + if err := assets.ApplyServiceAccounts(sa, kubeconfigPath); err != nil { + logrus.Warningf("failed to apply sa %v: %v", sa, err) + return err + } + return nil + +} diff --git a/pkg/components/components.go b/pkg/components/components.go index 5bd937fd20..76df7491ed 100755 --- a/pkg/components/components.go +++ b/pkg/components/components.go @@ -28,5 +28,9 @@ func StartComponents(cfg *config.MicroshiftConfig) error { logrus.Warningf("failed to start Flannel: %v", err) return err } + if err := startClusterPolicyController(cfg.DataDir + "/resources/kubeadmin/kubeconfig"); err != nil { + logrus.Warningf("failed to start cluster-policy-controller: %v", err) + return err + } return nil } diff --git a/pkg/controllers/apiservice.go b/pkg/controllers/apiservice.go index 81d6447197..a81ebbe5f4 100644 --- a/pkg/controllers/apiservice.go +++ b/pkg/controllers/apiservice.go @@ -217,6 +217,7 @@ func applySCCs(kubeconfigPath string) error { func PrepareOCP(cfg *config.MicroshiftConfig) error { if err := assets.ApplyNamespaces([]string{ "assets/core/0000_50_cluster-openshift-controller-manager_00_namespace.yaml", + "assets/core/0000_50_cluster-openshift-cluster-policy-controller_00_namespace.yaml", }, cfg.DataDir+"/resources/kubeadmin/kubeconfig"); err != nil { logrus.Warningf("failed to apply openshift namespaces %v", err) return err diff --git a/pkg/controllers/openshift-cluster-policy-controller.go b/pkg/controllers/openshift-cluster-policy-controller.go new file mode 100644 index 0000000000..e2eb20c073 --- /dev/null +++ b/pkg/controllers/openshift-cluster-policy-controller.go @@ -0,0 +1,116 @@ +/* +Copyright © 2021 Microshift Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package controllers + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + + clusterpolicycontroller "github.com/openshift/cluster-policy-controller/pkg/cmd/cluster-policy-controller" + "github.com/openshift/library-go/pkg/config/client" + "github.com/openshift/library-go/pkg/controller/controllercmd" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/microshift/pkg/config" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" +) + +const ( + podNameEnv = "POD_NAME" + podNamespaceEnv = "POD_NAMESPACE" + componentName = "cluster-policy-controller" + componentNamespace = "openshift-kube-controller-manager" +) + +// OpenShift ClusterPolicyController Service +type OpenShiftClusterPolicyController struct { + controllerFlags *controllercmd.ControllerFlags + kubeconfig string +} + +func NewOpenShiftClusterPolicyController(cfg *config.MicroshiftConfig) *OpenShiftClusterPolicyController { + s := &OpenShiftClusterPolicyController{} + s.configure(cfg) + return s +} + +func (s *OpenShiftClusterPolicyController) Name() string { + return "openshift-cluster-policy-controller" +} +func (s *OpenShiftClusterPolicyController) Dependencies() []string { + return []string{"kube-apiserver", "openshift-controller-manager", "ocp-apiserver"} +} + +func (s *OpenShiftClusterPolicyController) Run(ctx context.Context, ready chan<- struct{}, stopped chan<- struct{}) error { + defer close(stopped) + klog.Infof("starting openshift-cluster-policy-controller") + _, unstructuredConfig, err := s.controllerFlags.ToConfigObj() + if err != nil { + return err + } + clientConfig, err := client.GetKubeConfigOrInClusterConfig(s.kubeconfig, nil) + if err != nil { + return err + } + controllerRef := &corev1.ObjectReference{ + Kind: "Pod", + Name: os.Getenv(podNameEnv), + Namespace: os.Getenv(podNamespaceEnv), + } + kubeClient := kubernetes.NewForConfigOrDie(clientConfig) + eventRecorder := events.NewKubeRecorderWithOptions(kubeClient.CoreV1().Events(componentNamespace), events.RecommendedClusterSingletonCorrelatorOptions(), componentName, controllerRef) + protoConfig := rest.CopyConfig(clientConfig) + protoConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + protoConfig.ContentType = "application/vnd.kubernetes.protobuf" + controllerContext := &controllercmd.ControllerContext{ + ComponentConfig: unstructuredConfig, + KubeConfig: clientConfig, + ProtoKubeConfig: protoConfig, + EventRecorder: eventRecorder, + } + if err := clusterpolicycontroller.RunClusterPolicyController(ctx, controllerContext); err != nil { + return err + } + return ctx.Err() +} + +func (s *OpenShiftClusterPolicyController) configure(cfg *config.MicroshiftConfig) error { + controllerFlags := controllercmd.NewControllerFlags() + kubeconfig := filepath.Join(cfg.DataDir, "resources", "kubeadmin", "kubeconfig") + s.kubeconfig = kubeconfig + path := filepath.Join(cfg.DataDir, "resources", "openshift-cluster-policy-controller", "config", "config.yaml") + controllerFlags.ConfigFile = path + controllerFlags.KubeConfigFile = kubeconfig + controllerFlags.BindAddress = "0.0.0.0:10357" + s.controllerFlags = controllerFlags + + data := []byte(`apiVersion: openshiftcontrolplane.config.openshift.io/v1 +kind: OpenShiftControllerManagerConfig +kubeClientConfig: + kubeConfig: ` + cfg.DataDir + `/resources/kubeadmin/kubeconfig +servingInfo: + bindAddress: "0.0.0.0:10357" + certFile: ` + cfg.DataDir + `/resources/openshift-cluster-policy-controller/secrets/tls.crt + keyFile: ` + cfg.DataDir + `/resources/openshift-cluster-policy-controller/secrets/tls.key + clientCA: ` + cfg.DataDir + `/certs/ca-bundle/ca-bundle.crt`) + + os.MkdirAll(filepath.Dir(path), os.FileMode(0755)) + return ioutil.WriteFile(path, data, 0644) +} diff --git a/pkg/release/release_amd64.go b/pkg/release/release_amd64.go index 2727358a2b..b55f2c680f 100644 --- a/pkg/release/release_amd64.go +++ b/pkg/release/release_amd64.go @@ -22,6 +22,7 @@ package release func init() { Image = map[string]string{ "cli": "quay.io/openshift/okd-content@sha256:27f7918b5f0444e278118b2ee054f5b6fadfc4005cf91cb78106c3f5e1833edd", + "cluster_policy_controller": "quay.io/openshift/okd-content@sha256:caf8254cbd4f3fc3e923682106a39f3bcfc62e9746ca909ed50b930e2d17a166", "coredns": "quay.io/openshift/okd-content@sha256:bcdefdbcee8af1e634e68a850c52fe1e9cb31364525e30f5b20ee4eacb93c3e8", "haproxy_router": "quay.io/openshift/okd-content@sha256:01cfbbfdc11e2cbb8856f31a65c83acc7cfbd1986c1309f58c255840efcc0b64", "kube_flannel": "quay.io/coreos/flannel:v0.14.0", diff --git a/scripts/rebase.sh b/scripts/rebase.sh index 5c9b62b3be..79fe715b51 100755 --- a/scripts/rebase.sh +++ b/scripts/rebase.sh @@ -24,7 +24,7 @@ shopt -s expand_aliases REPOROOT="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/../")" STAGING_DIR="$REPOROOT/_output/staging" -EMBEDDED_COMPONENTS="etcd hyperkube openshift-apiserver openshift-controller-manager" +EMBEDDED_COMPONENTS="etcd hyperkube openshift-apiserver openshift-controller-manager openshift-cluster-policy-controller" LOADED_COMPONENTS="cluster-dns-operator cluster-ingress-operator service-ca-operator" diff --git a/vendor/github.com/openshift/api/securityinternal/v1/0000_03_securityinternal-openshift_02_rangeallocation.crd.yaml b/vendor/github.com/openshift/api/securityinternal/v1/0000_03_securityinternal-openshift_02_rangeallocation.crd.yaml new file mode 100644 index 0000000000..5ae9859025 --- /dev/null +++ b/vendor/github.com/openshift/api/securityinternal/v1/0000_03_securityinternal-openshift_02_rangeallocation.crd.yaml @@ -0,0 +1,49 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/751 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: rangeallocations.security.internal.openshift.io +spec: + group: security.internal.openshift.io + names: + kind: RangeAllocation + listKind: RangeAllocationList + plural: rangeallocations + singular: rangeallocation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: RangeAllocation is used so we can easily expose a RangeAllocation + typed for security group This is an internal API, not intended for external + consumption. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + data: + description: data is a byte array representing the serialized state of + a range allocation. It is a bitmap with each bit set to one to represent + a range is taken. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + range: + description: range is a string representing a unique label for a range + of uids, "1000000000-2000000000/10000". + type: string + type: object + served: true + storage: true diff --git a/vendor/github.com/openshift/api/securityinternal/v1/0000_03_securityinternal-openshift_02_rangeallocation.crd.yaml-patch b/vendor/github.com/openshift/api/securityinternal/v1/0000_03_securityinternal-openshift_02_rangeallocation.crd.yaml-patch new file mode 100644 index 0000000000..3b69986a44 --- /dev/null +++ b/vendor/github.com/openshift/api/securityinternal/v1/0000_03_securityinternal-openshift_02_rangeallocation.crd.yaml-patch @@ -0,0 +1,2 @@ +- op: remove + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/data/format diff --git a/vendor/github.com/openshift/api/securityinternal/v1/doc.go b/vendor/github.com/openshift/api/securityinternal/v1/doc.go new file mode 100644 index 0000000000..ce0d85c28c --- /dev/null +++ b/vendor/github.com/openshift/api/securityinternal/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=security.internal.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/securityinternal/v1/register.go b/vendor/github.com/openshift/api/securityinternal/v1/register.go new file mode 100644 index 0000000000..7099e51c2b --- /dev/null +++ b/vendor/github.com/openshift/api/securityinternal/v1/register.go @@ -0,0 +1,39 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "security.internal.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &RangeAllocation{}, + &RangeAllocationList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/securityinternal/v1/types.go b/vendor/github.com/openshift/api/securityinternal/v1/types.go new file mode 100644 index 0000000000..537b7c4919 --- /dev/null +++ b/vendor/github.com/openshift/api/securityinternal/v1/types.go @@ -0,0 +1,35 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RangeAllocation is used so we can easily expose a RangeAllocation typed for security group +// This is an internal API, not intended for external consumption. +type RangeAllocation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // range is a string representing a unique label for a range of uids, "1000000000-2000000000/10000". + Range string `json:"range"` + + // data is a byte array representing the serialized state of a range allocation. It is a bitmap + // with each bit set to one to represent a range is taken. + Data []byte `json:"data"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RangeAllocationList is a list of RangeAllocations objects +// This is an internal API, not intended for external consumption. +type RangeAllocationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + // List of RangeAllocations. + Items []RangeAllocation `json:"items"` +} diff --git a/vendor/github.com/openshift/api/securityinternal/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/securityinternal/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..ee3997e9ae --- /dev/null +++ b/vendor/github.com/openshift/api/securityinternal/v1/zz_generated.deepcopy.go @@ -0,0 +1,73 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. +func (in *RangeAllocation) DeepCopy() *RangeAllocation { + if in == nil { + return nil + } + out := new(RangeAllocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RangeAllocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeAllocationList) DeepCopyInto(out *RangeAllocationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RangeAllocation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocationList. +func (in *RangeAllocationList) DeepCopy() *RangeAllocationList { + if in == nil { + return nil + } + out := new(RangeAllocationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RangeAllocationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/securityinternal/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/securityinternal/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000..b9a0a13b2e --- /dev/null +++ b/vendor/github.com/openshift/api/securityinternal/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,33 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_RangeAllocation = map[string]string{ + "": "RangeAllocation is used so we can easily expose a RangeAllocation typed for security group This is an internal API, not intended for external consumption.", + "range": "range is a string representing a unique label for a range of uids, \"1000000000-2000000000/10000\".", + "data": "data is a byte array representing the serialized state of a range allocation. It is a bitmap with each bit set to one to represent a range is taken.", +} + +func (RangeAllocation) SwaggerDoc() map[string]string { + return map_RangeAllocation +} + +var map_RangeAllocationList = map[string]string{ + "": "RangeAllocationList is a list of RangeAllocations objects This is an internal API, not intended for external consumption.", + "items": "List of RangeAllocations.", +} + +func (RangeAllocationList) SwaggerDoc() map[string]string { + return map_RangeAllocationList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/clientset.go new file mode 100644 index 0000000000..4a701ba932 --- /dev/null +++ b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/clientset.go @@ -0,0 +1,81 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + securityv1 "github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + SecurityV1() securityv1.SecurityV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + securityV1 *securityv1.SecurityV1Client +} + +// SecurityV1 retrieves the SecurityV1Client +func (c *Clientset) SecurityV1() securityv1.SecurityV1Interface { + return c.securityV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.securityV1, err = securityv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.securityV1 = securityv1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.securityV1 = securityv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/doc.go b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/doc.go new file mode 100644 index 0000000000..0e0c2a8900 --- /dev/null +++ b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..14db57a58f --- /dev/null +++ b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..36fa42b4e1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + securityv1 "github.com/openshift/api/securityinternal/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + securityv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1/doc.go b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1/doc.go new file mode 100644 index 0000000000..225e6b2be3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1/generated_expansion.go new file mode 100644 index 0000000000..b26736bb1d --- /dev/null +++ b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1/generated_expansion.go @@ -0,0 +1,5 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type RangeAllocationExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1/rangeallocation.go b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1/rangeallocation.go new file mode 100644 index 0000000000..c9b0de1333 --- /dev/null +++ b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1/rangeallocation.go @@ -0,0 +1,152 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/securityinternal/v1" + scheme "github.com/openshift/client-go/securityinternal/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// RangeAllocationsGetter has a method to return a RangeAllocationInterface. +// A group's client should implement this interface. +type RangeAllocationsGetter interface { + RangeAllocations() RangeAllocationInterface +} + +// RangeAllocationInterface has methods to work with RangeAllocation resources. +type RangeAllocationInterface interface { + Create(ctx context.Context, rangeAllocation *v1.RangeAllocation, opts metav1.CreateOptions) (*v1.RangeAllocation, error) + Update(ctx context.Context, rangeAllocation *v1.RangeAllocation, opts metav1.UpdateOptions) (*v1.RangeAllocation, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RangeAllocation, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.RangeAllocationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RangeAllocation, err error) + RangeAllocationExpansion +} + +// rangeAllocations implements RangeAllocationInterface +type rangeAllocations struct { + client rest.Interface +} + +// newRangeAllocations returns a RangeAllocations +func newRangeAllocations(c *SecurityV1Client) *rangeAllocations { + return &rangeAllocations{ + client: c.RESTClient(), + } +} + +// Get takes name of the rangeAllocation, and returns the corresponding rangeAllocation object, and an error if there is any. +func (c *rangeAllocations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RangeAllocation, err error) { + result = &v1.RangeAllocation{} + err = c.client.Get(). + Resource("rangeallocations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RangeAllocations that match those selectors. +func (c *rangeAllocations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RangeAllocationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.RangeAllocationList{} + err = c.client.Get(). + Resource("rangeallocations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested rangeAllocations. +func (c *rangeAllocations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("rangeallocations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a rangeAllocation and creates it. Returns the server's representation of the rangeAllocation, and an error, if there is any. +func (c *rangeAllocations) Create(ctx context.Context, rangeAllocation *v1.RangeAllocation, opts metav1.CreateOptions) (result *v1.RangeAllocation, err error) { + result = &v1.RangeAllocation{} + err = c.client.Post(). + Resource("rangeallocations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(rangeAllocation). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a rangeAllocation and updates it. Returns the server's representation of the rangeAllocation, and an error, if there is any. +func (c *rangeAllocations) Update(ctx context.Context, rangeAllocation *v1.RangeAllocation, opts metav1.UpdateOptions) (result *v1.RangeAllocation, err error) { + result = &v1.RangeAllocation{} + err = c.client.Put(). + Resource("rangeallocations"). + Name(rangeAllocation.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(rangeAllocation). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the rangeAllocation and deletes it. Returns an error if one occurs. +func (c *rangeAllocations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("rangeallocations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *rangeAllocations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("rangeallocations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched rangeAllocation. +func (c *rangeAllocations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RangeAllocation, err error) { + result = &v1.RangeAllocation{} + err = c.client.Patch(pt). + Resource("rangeallocations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1/securityinternal_client.go b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1/securityinternal_client.go new file mode 100644 index 0000000000..dba2f945dd --- /dev/null +++ b/vendor/github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1/securityinternal_client.go @@ -0,0 +1,73 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/securityinternal/v1" + "github.com/openshift/client-go/securityinternal/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type SecurityV1Interface interface { + RESTClient() rest.Interface + RangeAllocationsGetter +} + +// SecurityV1Client is used to interact with features provided by the security.internal.openshift.io group. +type SecurityV1Client struct { + restClient rest.Interface +} + +func (c *SecurityV1Client) RangeAllocations() RangeAllocationInterface { + return newRangeAllocations(c) +} + +// NewForConfig creates a new SecurityV1Client for the given config. +func NewForConfig(c *rest.Config) (*SecurityV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SecurityV1Client{client}, nil +} + +// NewForConfigOrDie creates a new SecurityV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SecurityV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SecurityV1Client for the given RESTClient. +func New(c rest.Interface) *SecurityV1Client { + return &SecurityV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SecurityV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/LICENSE b/vendor/github.com/openshift/cluster-policy-controller/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/client/genericinformers/interface.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/client/genericinformers/interface.go new file mode 100644 index 0000000000..5a7867d697 --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/client/genericinformers/interface.go @@ -0,0 +1,80 @@ +package genericinformers + +import ( + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" +) + +type GenericResourceInformer interface { + ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) + Start(stopCh <-chan struct{}) +} + +// GenericInternalResourceInformerFunc will return an internal informer for any resource matching +// its group resource, instead of the external version. Only valid for use where the type is accessed +// via generic interfaces, such as the garbage collector with ObjectMeta. +type GenericInternalResourceInformerFunc func(resource schema.GroupVersionResource) (informers.GenericInformer, error) + +func (fn GenericInternalResourceInformerFunc) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + resource.Version = runtime.APIVersionInternal + return fn(resource) +} + +// this is a temporary condition until we rewrite enough of generation to auto-conform to the required interface and no longer need the internal version shim +func (fn GenericInternalResourceInformerFunc) Start(stopCh <-chan struct{}) {} + +// genericResourceInformerFunc will handle a cast to a matching type +type GenericResourceInformerFunc func(resource schema.GroupVersionResource) (informers.GenericInformer, error) + +func (fn GenericResourceInformerFunc) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return fn(resource) +} + +// this is a temporary condition until we rewrite enough of generation to auto-conform to the required interface and no longer need the internal version shim +func (fn GenericResourceInformerFunc) Start(stopCh <-chan struct{}) {} + +type genericInformers struct { + // this is a temporary condition until we rewrite enough of generation to auto-conform to the required interface and no longer need the internal version shim + startFn func(stopCh <-chan struct{}) + generic []GenericResourceInformer + // bias is a map that tries loading an informer from another GVR before using the original + bias map[schema.GroupVersionResource]schema.GroupVersionResource +} + +func NewGenericInformers(startFn func(stopCh <-chan struct{}), informers ...GenericResourceInformer) genericInformers { + return genericInformers{ + startFn: startFn, + generic: informers, + } +} + +func (i genericInformers) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + if try, ok := i.bias[resource]; ok { + if res, err := i.ForResource(try); err == nil { + return res, nil + } + } + + var firstErr error + for _, generic := range i.generic { + informer, err := generic.ForResource(resource) + if err == nil { + return informer, nil + } + if firstErr == nil { + firstErr = err + } + } + klog.V(4).Infof("Couldn't find informer for %v", resource) + return nil, firstErr +} + +func (i genericInformers) Start(stopCh <-chan struct{}) { + i.startFn(stopCh) + for _, generic := range i.generic { + generic.Start(stopCh) + } +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/cluster-policy-controller/cmd.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/cluster-policy-controller/cmd.go new file mode 100644 index 0000000000..0b63a3e7e4 --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/cluster-policy-controller/cmd.go @@ -0,0 +1,32 @@ +package cluster_policy_controller + +import ( + "context" + "os" + + "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" + + "github.com/openshift/library-go/pkg/controller/controllercmd" + + clusterpolicyversion "github.com/openshift/cluster-policy-controller/pkg/version" +) + +const ( + podNameEnv = "POD_NAME" + podNamespaceEnv = "POD_NAMESPACE" +) + +func NewClusterPolicyControllerCommand(name string) *cobra.Command { + cmd := controllercmd.NewControllerCommandConfig("cluster-policy-controller", clusterpolicyversion.Get(), RunClusterPolicyController). + WithComponentOwnerReference(&corev1.ObjectReference{ + Kind: "Pod", + Name: os.Getenv(podNameEnv), + Namespace: os.Getenv(podNamespaceEnv), + }). + NewCommandWithContext(context.Background()) + cmd.Use = name + cmd.Short = "Start the cluster policy controller" + + return cmd +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/cluster-policy-controller/openshiftcontrolplane_config.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/cluster-policy-controller/openshiftcontrolplane_config.go new file mode 100644 index 0000000000..e8dcea2728 --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/cluster-policy-controller/openshiftcontrolplane_config.go @@ -0,0 +1,50 @@ +package cluster_policy_controller + +import ( + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + openshiftcontrolplanev1 "github.com/openshift/api/openshiftcontrolplane/v1" + "github.com/openshift/library-go/pkg/config/configdefaults" +) + +func asOpenshiftControllerManagerConfig(config *unstructured.Unstructured) (*openshiftcontrolplanev1.OpenShiftControllerManagerConfig, error) { + result := &openshiftcontrolplanev1.OpenShiftControllerManagerConfig{} + if config != nil { + // make a copy we can mutate + configCopy := config.DeepCopy() + // force the config to our version to read it + configCopy.SetGroupVersionKind(openshiftcontrolplanev1.GroupVersion.WithKind("OpenShiftControllerManagerConfig")) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(configCopy.Object, result); err != nil { + return nil, err + } + } + + setRecommendedOpenShiftControllerConfigDefaults(result) + + return result, nil +} + +func setRecommendedOpenShiftControllerConfigDefaults(config *openshiftcontrolplanev1.OpenShiftControllerManagerConfig) { + configdefaults.SetRecommendedKubeClientConfigDefaults(&config.KubeClientConfig) + + configdefaults.DefaultStringSlice(&config.Controllers, []string{"*"}) + + configdefaults.DefaultString(&config.SecurityAllocator.UIDAllocatorRange, "1000000000-1999999999/10000") + configdefaults.DefaultString(&config.SecurityAllocator.MCSAllocatorRange, "s0:/2") + if config.SecurityAllocator.MCSLabelsPerProject == 0 { + config.SecurityAllocator.MCSLabelsPerProject = 5 + } + + if config.ResourceQuota.MinResyncPeriod.Duration == 0 { + config.ResourceQuota.MinResyncPeriod.Duration = 5 * time.Minute + } + if config.ResourceQuota.SyncPeriod.Duration == 0 { + config.ResourceQuota.SyncPeriod.Duration = 12 * time.Hour + } + if config.ResourceQuota.ConcurrentSyncs == 0 { + config.ResourceQuota.ConcurrentSyncs = 5 + } +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/cluster-policy-controller/policy_controller.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/cluster-policy-controller/policy_controller.go new file mode 100644 index 0000000000..14ae0efcc1 --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/cluster-policy-controller/policy_controller.go @@ -0,0 +1,94 @@ +package cluster_policy_controller + +import ( + "context" + "fmt" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + + origincontrollers "github.com/openshift/cluster-policy-controller/pkg/cmd/controller" + "github.com/openshift/library-go/pkg/controller/controllercmd" +) + +func RunClusterPolicyController(ctx context.Context, controllerContext *controllercmd.ControllerContext) error { + config, err := asOpenshiftControllerManagerConfig(controllerContext.ComponentConfig) + if err != nil { + return err + } + + kubeClient, err := kubernetes.NewForConfig(controllerContext.KubeConfig) + if err != nil { + return err + } + + if err := WaitForHealthyAPIServer(kubeClient.Discovery().RESTClient()); err != nil { + klog.Fatal(err) + } + + openshiftControllerManagerContext, err := origincontrollers.NewControllerContext(ctx, controllerContext, *config) + if err != nil { + klog.Fatal(err) + } + if err := startControllers(ctx, openshiftControllerManagerContext); err != nil { + klog.Fatal(err) + } + openshiftControllerManagerContext.StartInformers(ctx.Done()) + + <-ctx.Done() + return nil +} + +func WaitForHealthyAPIServer(client rest.Interface) error { + var healthzContent string + // If apiserver is not running we should wait for some time and fail only then. This is particularly + // important when we start apiserver and controller manager at the same time. + err := wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) { + healthStatus := 0 + resp := client.Get().AbsPath("/healthz").Do(context.TODO()).StatusCode(&healthStatus) + if healthStatus != http.StatusOK { + klog.Errorf("Server isn't healthy yet. Waiting a little while.") + return false, nil + } + content, _ := resp.Raw() + healthzContent = string(content) + + return true, nil + }) + if err != nil { + return fmt.Errorf("server unhealthy: %v: %v", healthzContent, err) + } + + return nil +} + +// startControllers launches the controllers +// allocation controller is passed in because it wants direct etcd access. Naughty. +func startControllers(ctx context.Context, controllerCtx *origincontrollers.EnhancedControllerContext) error { + for controllerName, initFn := range origincontrollers.ControllerInitializers { + if !controllerCtx.IsControllerEnabled(controllerName) { + klog.Warningf("%q is disabled", controllerName) + continue + } + + klog.V(1).Infof("Starting %q", controllerName) + started, err := initFn(ctx, controllerCtx) + if err != nil { + klog.Fatalf("Error starting %q (%v)", controllerName, err) + return err + } + if !started { + klog.Warningf("Skipping %q", controllerName) + continue + } + klog.Infof("Started %q", controllerName) + } + + klog.Infof("Started Origin Controllers") + + return nil +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/config.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/config.go new file mode 100644 index 0000000000..d2b8a5e4db --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/config.go @@ -0,0 +1,15 @@ +package controller + +var ControllerInitializers = map[string]InitFunc{ + "openshift.io/namespace-security-allocation": RunNamespaceSecurityAllocationController, + "openshift.io/resourcequota": RunResourceQuotaManager, + "openshift.io/cluster-quota-reconciliation": RunClusterQuotaReconciliationController, + "openshift.io/cluster-csr-approver": RunCSRApproverController, +} + +const ( + infraClusterQuotaReconciliationControllerServiceAccountName = "cluster-quota-reconciliation-controller" + infraClusterCSRApproverControllerServiceAccountName = "cluster-csr-approver-controller" + infraNamespaceSecurityAllocationControllerServiceAccountName = "namespace-security-allocation-controller" + defaultOpenShiftInfraNamespace = "openshift-infra" +) diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/csr.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/csr.go new file mode 100644 index 0000000000..8631831257 --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/csr.go @@ -0,0 +1,46 @@ +package controller + +import ( + "context" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + + "github.com/openshift/library-go/pkg/operator/csr" +) + +const ( + controllerName = "csr-approver-controller" + monitoringServiceAccountNamespace = "openshift-monitoring" + monitoringServiceAccountName = "cluster-monitoring-operator" + monitoringCertificateSubject = "CN=system:serviceaccount:openshift-monitoring:prometheus-k8s" + monitoringLabelKey = "metrics.openshift.io/csr.subject" + monitoringLabelValue = "prometheus" +) + +func RunCSRApproverController(ctx context.Context, controllerCtx *EnhancedControllerContext) (bool, error) { + kubeClient, err := controllerCtx.ClientBuilder.Client(infraClusterCSRApproverControllerServiceAccountName) + if err != nil { + return true, err + } + + selector := labels.NewSelector() + labelsRequirement, err := labels.NewRequirement(monitoringLabelKey, selection.Equals, []string{monitoringLabelValue}) + if err != nil { + return true, err + } + selector = selector.Add(*labelsRequirement) + + controller := csr.NewCSRApproverController( + controllerName, + nil, + kubeClient.CertificatesV1().CertificateSigningRequests(), + controllerCtx.KubernetesInformers.Certificates().V1().CertificateSigningRequests(), + csr.NewLabelFilter(selector), + csr.NewServiceAccountApprover(monitoringServiceAccountNamespace, monitoringServiceAccountName, monitoringCertificateSubject), + controllerCtx.EventRecorder) + + go controller.Run(ctx, 1) + + return true, nil +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/interfaces.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/interfaces.go new file mode 100644 index 0000000000..d595c1a037 --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/interfaces.go @@ -0,0 +1,425 @@ +package controller + +import ( + "context" + "sync" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + cacheddiscovery "k8s.io/client-go/discovery/cached" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/controller-manager/app" + "k8s.io/controller-manager/pkg/clientbuilder" + + openshiftcontrolplanev1 "github.com/openshift/api/openshiftcontrolplane/v1" + appsclient "github.com/openshift/client-go/apps/clientset/versioned" + appsinformer "github.com/openshift/client-go/apps/informers/externalversions" + buildclient "github.com/openshift/client-go/build/clientset/versioned" + buildinformer "github.com/openshift/client-go/build/informers/externalversions" + configclient "github.com/openshift/client-go/config/clientset/versioned" + configinformer "github.com/openshift/client-go/config/informers/externalversions" + imageclient "github.com/openshift/client-go/image/clientset/versioned" + imageinformer "github.com/openshift/client-go/image/informers/externalversions" + operatorclient "github.com/openshift/client-go/operator/clientset/versioned" + operatorinformer "github.com/openshift/client-go/operator/informers/externalversions" + quotaclient "github.com/openshift/client-go/quota/clientset/versioned" + quotainformer "github.com/openshift/client-go/quota/informers/externalversions" + routeclient "github.com/openshift/client-go/route/clientset/versioned" + routeinformer "github.com/openshift/client-go/route/informers/externalversions" + securityclient "github.com/openshift/client-go/securityinternal/clientset/versioned" + templateclient "github.com/openshift/client-go/template/clientset/versioned" + templateinformer "github.com/openshift/client-go/template/informers/externalversions" + "github.com/openshift/library-go/pkg/controller/controllercmd" + + "github.com/openshift/cluster-policy-controller/pkg/client/genericinformers" +) + +func NewControllerContext( + ctx context.Context, + controllerContext *controllercmd.ControllerContext, + config openshiftcontrolplanev1.OpenShiftControllerManagerConfig, +) (*EnhancedControllerContext, error) { + inClientConfig := controllerContext.KubeConfig + + const defaultInformerResyncPeriod = 10 * time.Minute + kubeClient, err := kubernetes.NewForConfig(inClientConfig) + if err != nil { + return nil, err + } + + // copy to avoid messing with original + clientConfig := rest.CopyConfig(inClientConfig) + // divide up the QPS since it re-used separately for every client + // TODO, eventually make this configurable individually in some way. + if clientConfig.QPS > 0 { + clientConfig.QPS = clientConfig.QPS/10 + 1 + } + if clientConfig.Burst > 0 { + clientConfig.Burst = clientConfig.Burst/10 + 1 + } + + discoveryClient := cacheddiscovery.NewMemCacheClient(kubeClient.Discovery()) + dynamicRestMapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) + dynamicRestMapper.Reset() + go wait.Until(dynamicRestMapper.Reset, 30*time.Second, ctx.Done()) + + appsClient, err := appsclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + buildClient, err := buildclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + configClient, err := configclient.NewForConfig(nonProtobufConfig(clientConfig)) + if err != nil { + return nil, err + } + imageClient, err := imageclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + quotaClient, err := quotaclient.NewForConfig(nonProtobufConfig(clientConfig)) + if err != nil { + return nil, err + } + routerClient, err := routeclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + templateClient, err := templateclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + operatorClient, err := operatorclient.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + + openshiftControllerContext := &EnhancedControllerContext{ + ControllerContext: controllerContext, + OpenshiftControllerConfig: config, + + ClientBuilder: OpenshiftControllerClientBuilder{ + ControllerClientBuilder: clientbuilder.NewDynamicClientBuilder(rest.AnonymousClientConfig(clientConfig), kubeClient.CoreV1(), defaultOpenShiftInfraNamespace), + }, + KubernetesInformers: informers.NewSharedInformerFactory(kubeClient, defaultInformerResyncPeriod), + OpenshiftConfigKubernetesInformers: informers.NewSharedInformerFactoryWithOptions(kubeClient, defaultInformerResyncPeriod, informers.WithNamespace("openshift-config")), + ControllerManagerKubeInformers: informers.NewSharedInformerFactoryWithOptions(kubeClient, defaultInformerResyncPeriod, informers.WithNamespace("openshift-controller-manager")), + AppsInformers: appsinformer.NewSharedInformerFactory(appsClient, defaultInformerResyncPeriod), + BuildInformers: buildinformer.NewSharedInformerFactory(buildClient, defaultInformerResyncPeriod), + ConfigInformers: configinformer.NewSharedInformerFactory(configClient, defaultInformerResyncPeriod), + ImageInformers: imageinformer.NewSharedInformerFactory(imageClient, defaultInformerResyncPeriod), + OperatorInformers: operatorinformer.NewSharedInformerFactory(operatorClient, defaultInformerResyncPeriod), + QuotaInformers: quotainformer.NewSharedInformerFactory(quotaClient, defaultInformerResyncPeriod), + RouteInformers: routeinformer.NewSharedInformerFactory(routerClient, defaultInformerResyncPeriod), + TemplateInformers: templateinformer.NewSharedInformerFactory(templateClient, defaultInformerResyncPeriod), + InformersStarted: make(chan struct{}), + RestMapper: dynamicRestMapper, + } + openshiftControllerContext.GenericResourceInformer = openshiftControllerContext.ToGenericInformer() + + return openshiftControllerContext, nil +} + +func (c *EnhancedControllerContext) ToGenericInformer() genericinformers.GenericResourceInformer { + return genericinformers.NewGenericInformers( + c.StartInformers, + c.KubernetesInformers, + genericinformers.GenericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return c.AppsInformers.ForResource(resource) + }), + genericinformers.GenericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return c.BuildInformers.ForResource(resource) + }), + genericinformers.GenericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return c.ConfigInformers.ForResource(resource) + }), + genericinformers.GenericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return c.ImageInformers.ForResource(resource) + }), + genericinformers.GenericInternalResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return c.QuotaInformers.ForResource(resource) + }), + genericinformers.GenericResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return c.RouteInformers.ForResource(resource) + }), + genericinformers.GenericInternalResourceInformerFunc(func(resource schema.GroupVersionResource) (informers.GenericInformer, error) { + return c.TemplateInformers.ForResource(resource) + }), + ) +} + +type EnhancedControllerContext struct { + *controllercmd.ControllerContext + OpenshiftControllerConfig openshiftcontrolplanev1.OpenShiftControllerManagerConfig + + // ClientBuilder will provide a client for this controller to use + ClientBuilder ControllerClientBuilder + + KubernetesInformers informers.SharedInformerFactory + OpenshiftConfigKubernetesInformers informers.SharedInformerFactory + ControllerManagerKubeInformers informers.SharedInformerFactory + + TemplateInformers templateinformer.SharedInformerFactory + QuotaInformers quotainformer.SharedInformerFactory + RouteInformers routeinformer.SharedInformerFactory + + AppsInformers appsinformer.SharedInformerFactory + BuildInformers buildinformer.SharedInformerFactory + ConfigInformers configinformer.SharedInformerFactory + ImageInformers imageinformer.SharedInformerFactory + OperatorInformers operatorinformer.SharedInformerFactory + + GenericResourceInformer genericinformers.GenericResourceInformer + RestMapper meta.RESTMapper + + informersStartedLock sync.Mutex + informersStartedClosed bool + // InformersStarted is closed after all of the controllers have been initialized and are running. After this point it is safe, + // for an individual controller to start the shared informers. Before it is closed, they should not. + InformersStarted chan struct{} +} + +func (c *EnhancedControllerContext) StartInformers(stopCh <-chan struct{}) { + c.KubernetesInformers.Start(stopCh) + c.OpenshiftConfigKubernetesInformers.Start(stopCh) + c.ControllerManagerKubeInformers.Start(stopCh) + + c.AppsInformers.Start(stopCh) + c.BuildInformers.Start(stopCh) + c.ConfigInformers.Start(stopCh) + c.ImageInformers.Start(stopCh) + + c.TemplateInformers.Start(stopCh) + c.QuotaInformers.Start(stopCh) + c.RouteInformers.Start(stopCh) + c.OperatorInformers.Start(stopCh) + + c.informersStartedLock.Lock() + defer c.informersStartedLock.Unlock() + if !c.informersStartedClosed { + close(c.InformersStarted) + c.informersStartedClosed = true + } +} + +func (c *EnhancedControllerContext) IsControllerEnabled(name string) bool { + return app.IsControllerEnabled(name, sets.String{}, c.OpenshiftControllerConfig.Controllers) +} + +type ControllerClientBuilder interface { + clientbuilder.ControllerClientBuilder + + OpenshiftAppsClient(name string) (appsclient.Interface, error) + OpenshiftAppsClientOrDie(name string) appsclient.Interface + + OpenshiftBuildClient(name string) (buildclient.Interface, error) + OpenshiftBuildClientOrDie(name string) buildclient.Interface + + OpenshiftConfigClient(name string) (configclient.Interface, error) + OpenshiftConfigClientOrDie(name string) configclient.Interface + + OpenshiftSecurityClient(name string) (securityclient.Interface, error) + OpenshiftSecurityClientOrDie(name string) securityclient.Interface + + // OpenShift clients based on generated internal clientsets + OpenshiftTemplateClient(name string) (templateclient.Interface, error) + OpenshiftTemplateClientOrDie(name string) templateclient.Interface + + OpenshiftImageClient(name string) (imageclient.Interface, error) + OpenshiftImageClientOrDie(name string) imageclient.Interface + + OpenshiftQuotaClient(name string) (quotaclient.Interface, error) + OpenshiftQuotaClientOrDie(name string) quotaclient.Interface + + OpenshiftOperatorClient(name string) (operatorclient.Interface, error) + OpenshiftOperatorClientOrDie(name string) operatorclient.Interface +} + +// InitFunc is used to launch a particular controller. It may run additional "should I activate checks". +// Any error returned will cause the controller process to `Fatal` +// The bool indicates whether the controller was enabled. +type InitFunc func(ctx context.Context, controllerCtx *EnhancedControllerContext) (bool, error) + +type OpenshiftControllerClientBuilder struct { + clientbuilder.ControllerClientBuilder +} + +func (b OpenshiftControllerClientBuilder) OpenshiftOperatorClient(name string) (operatorclient.Interface, error) { + clientConfig, err := b.Config(name) + if err != nil { + return nil, err + } + return operatorclient.NewForConfig(clientConfig) +} + +func (b OpenshiftControllerClientBuilder) OpenshiftOperatorClientOrDie(name string) operatorclient.Interface { + client, err := b.OpenshiftOperatorClient(name) + if err != nil { + klog.Fatal(err) + } + return client +} + +// OpenshiftInternalTemplateClient provides a REST client for the template API. +// If the client cannot be created because of configuration error, this function +// will return an error. +func (b OpenshiftControllerClientBuilder) OpenshiftTemplateClient(name string) (templateclient.Interface, error) { + clientConfig, err := b.Config(name) + if err != nil { + return nil, err + } + return templateclient.NewForConfig(clientConfig) +} + +// OpenshiftInternalTemplateClientOrDie provides a REST client for the template API. +// If the client cannot be created because of configuration error, this function +// will panic. +func (b OpenshiftControllerClientBuilder) OpenshiftTemplateClientOrDie(name string) templateclient.Interface { + client, err := b.OpenshiftTemplateClient(name) + if err != nil { + klog.Fatal(err) + } + return client +} + +// OpenshiftImageClient provides a REST client for the image API. +// If the client cannot be created because of configuration error, this function +// will error. +func (b OpenshiftControllerClientBuilder) OpenshiftImageClient(name string) (imageclient.Interface, error) { + clientConfig, err := b.Config(name) + if err != nil { + return nil, err + } + return imageclient.NewForConfig(clientConfig) +} + +// OpenshiftImageClientOrDie provides a REST client for the image API. +// If the client cannot be created because of configuration error, this function +// will panic. +func (b OpenshiftControllerClientBuilder) OpenshiftImageClientOrDie(name string) imageclient.Interface { + client, err := b.OpenshiftImageClient(name) + if err != nil { + klog.Fatal(err) + } + return client +} + +// OpenshiftAppsClient provides a REST client for the apps API. +// If the client cannot be created because of configuration error, this function +// will error. +func (b OpenshiftControllerClientBuilder) OpenshiftAppsClient(name string) (appsclient.Interface, error) { + clientConfig, err := b.Config(name) + if err != nil { + return nil, err + } + return appsclient.NewForConfig(clientConfig) +} + +// OpenshiftAppsClientOrDie provides a REST client for the apps API. +// If the client cannot be created because of configuration error, this function +// will panic. +func (b OpenshiftControllerClientBuilder) OpenshiftAppsClientOrDie(name string) appsclient.Interface { + client, err := b.OpenshiftAppsClient(name) + if err != nil { + klog.Fatal(err) + } + return client +} + +// OpenshiftBuildClient provides a REST client for the build API. +// If the client cannot be created because of configuration error, this function +// will error. +func (b OpenshiftControllerClientBuilder) OpenshiftBuildClient(name string) (buildclient.Interface, error) { + clientConfig, err := b.Config(name) + if err != nil { + return nil, err + } + return buildclient.NewForConfig(clientConfig) +} + +// OpenshiftBuildClientOrDie provides a REST client for the build API. +// If the client cannot be created because of configuration error, this function +// will panic. +func (b OpenshiftControllerClientBuilder) OpenshiftBuildClientOrDie(name string) buildclient.Interface { + client, err := b.OpenshiftBuildClient(name) + if err != nil { + klog.Fatal(err) + } + return client +} + +// OpenshiftConfigClient provides a REST client for the build API. +// If the client cannot be created because of configuration error, this function +// will error. +func (b OpenshiftControllerClientBuilder) OpenshiftConfigClient(name string) (configclient.Interface, error) { + clientConfig, err := b.Config(name) + if err != nil { + return nil, err + } + return configclient.NewForConfig(nonProtobufConfig(clientConfig)) +} + +// OpenshiftConfigClientOrDie provides a REST client for the build API. +// If the client cannot be created because of configuration error, this function +// will panic. +func (b OpenshiftControllerClientBuilder) OpenshiftConfigClientOrDie(name string) configclient.Interface { + client, err := b.OpenshiftConfigClient(name) + if err != nil { + klog.Fatal(err) + } + return client +} + +func (b OpenshiftControllerClientBuilder) OpenshiftQuotaClient(name string) (quotaclient.Interface, error) { + clientConfig, err := b.Config(name) + if err != nil { + return nil, err + } + return quotaclient.NewForConfig(nonProtobufConfig(clientConfig)) +} + +// OpenshiftInternalBuildClientOrDie provides a REST client for the build API. +// If the client cannot be created because of configuration error, this function +// will panic. +func (b OpenshiftControllerClientBuilder) OpenshiftQuotaClientOrDie(name string) quotaclient.Interface { + client, err := b.OpenshiftQuotaClient(name) + if err != nil { + klog.Fatal(err) + } + return client +} + +func (b OpenshiftControllerClientBuilder) OpenshiftSecurityClient(name string) (securityclient.Interface, error) { + clientConfig, err := b.Config(name) + if err != nil { + return nil, err + } + return securityclient.NewForConfig(nonProtobufConfig(clientConfig)) +} + +func (b OpenshiftControllerClientBuilder) OpenshiftSecurityClientOrDie(name string) securityclient.Interface { + client, err := b.OpenshiftSecurityClient(name) + if err != nil { + klog.Fatal(err) + } + return client +} + +// nonProtobufConfig returns a copy of inConfig that doesn't force the use of protobufs, +// for working with CRD-based APIs. +func nonProtobufConfig(inConfig *rest.Config) *rest.Config { + npConfig := rest.CopyConfig(inConfig) + npConfig.ContentConfig.AcceptContentTypes = "application/json" + npConfig.ContentConfig.ContentType = "application/json" + return npConfig +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/quota.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/quota.go new file mode 100644 index 0000000000..d61e1e5434 --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/quota.go @@ -0,0 +1,140 @@ +package controller + +import ( + "context" + "math/rand" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + kquota "k8s.io/apiserver/pkg/quota/v1" + "k8s.io/apiserver/pkg/quota/v1/generic" + "k8s.io/kubernetes/pkg/controller" + kresourcequota "k8s.io/kubernetes/pkg/controller/resourcequota" + quotainstall "k8s.io/kubernetes/pkg/quota/v1/install" + + "github.com/openshift/library-go/pkg/quota/clusterquotamapping" + + "github.com/openshift/cluster-policy-controller/pkg/quota/clusterquotareconciliation" + image "github.com/openshift/cluster-policy-controller/pkg/quota/quotaimageexternal" +) + +func RunResourceQuotaManager(ctx context.Context, controllerCtx *EnhancedControllerContext) (bool, error) { + concurrentResourceQuotaSyncs := int(controllerCtx.OpenshiftControllerConfig.ResourceQuota.ConcurrentSyncs) + resourceQuotaSyncPeriod := controllerCtx.OpenshiftControllerConfig.ResourceQuota.SyncPeriod.Duration + replenishmentSyncPeriodFunc := calculateResyncPeriod(controllerCtx.OpenshiftControllerConfig.ResourceQuota.MinResyncPeriod.Duration) + saName := "resourcequota-controller" + listerFuncForResource := generic.ListerFuncForResourceFunc(controllerCtx.GenericResourceInformer.ForResource) + quotaConfiguration := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource) + resourceQuotaControllerClient := controllerCtx.ClientBuilder.ClientOrDie(saName) + imageEvaluators := image.NewReplenishmentEvaluators( + listerFuncForResource, + controllerCtx.ImageInformers.Image().V1().ImageStreams(), + controllerCtx.ClientBuilder.OpenshiftImageClientOrDie(saName).ImageV1()) + resourceQuotaRegistry := generic.NewRegistry(imageEvaluators) + discoveryFunc := resourceQuotaDiscoveryWrapper(resourceQuotaRegistry, resourceQuotaControllerClient.Discovery().ServerPreferredNamespacedResources) + + resourceQuotaControllerOptions := &kresourcequota.ControllerOptions{ + QuotaClient: resourceQuotaControllerClient.CoreV1(), + ResourceQuotaInformer: controllerCtx.KubernetesInformers.Core().V1().ResourceQuotas(), + ResyncPeriod: controller.StaticResyncPeriodFunc(resourceQuotaSyncPeriod), + Registry: resourceQuotaRegistry, + ReplenishmentResyncPeriod: replenishmentSyncPeriodFunc, + IgnoredResourcesFunc: quotaConfiguration.IgnoredResources, + InformersStarted: controllerCtx.InformersStarted, + InformerFactory: controllerCtx.GenericResourceInformer, + DiscoveryFunc: discoveryFunc, + } + ctrl, err := kresourcequota.NewController(resourceQuotaControllerOptions) + if err != nil { + return true, err + } + go ctrl.Run(concurrentResourceQuotaSyncs, ctx.Done()) + go ctrl.Sync(discoveryFunc, 30*time.Second, ctx.Done()) + + return true, nil +} + +func resourceQuotaDiscoveryWrapper(registry kquota.Registry, discoveryFunc kresourcequota.NamespacedResourcesFunc) kresourcequota.NamespacedResourcesFunc { + return func() ([]*metav1.APIResourceList, error) { + discoveryResources, discoveryErr := discoveryFunc() + if discoveryErr != nil && len(discoveryResources) == 0 { + return nil, discoveryErr + } + + interestingResources := []*metav1.APIResourceList{} + for _, resourceList := range discoveryResources { + gv, err := schema.ParseGroupVersion(resourceList.GroupVersion) + if err != nil { + return nil, err + } + for i := range resourceList.APIResources { + gr := schema.GroupResource{ + Group: gv.Group, + Resource: resourceList.APIResources[i].Name, + } + if evaluator := registry.Get(gr); evaluator != nil { + interestingResources = append(interestingResources, resourceList) + } + } + } + return interestingResources, nil + } +} + +func RunClusterQuotaReconciliationController(ctx context.Context, controllerCtx *EnhancedControllerContext) (bool, error) { + defaultResyncPeriod := 5 * time.Minute + defaultReplenishmentSyncPeriod := 12 * time.Hour + + saName := infraClusterQuotaReconciliationControllerServiceAccountName + + clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController( + controllerCtx.KubernetesInformers.Core().V1().Namespaces(), + controllerCtx.QuotaInformers.Quota().V1().ClusterResourceQuotas()) + resourceQuotaControllerClient := controllerCtx.ClientBuilder.ClientOrDie("resourcequota-controller") + discoveryFunc := resourceQuotaControllerClient.Discovery().ServerPreferredNamespacedResources + listerFuncForResource := generic.ListerFuncForResourceFunc(controllerCtx.GenericResourceInformer.ForResource) + quotaConfiguration := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource) + + // TODO make a union registry + resourceQuotaRegistry := generic.NewRegistry(quotaConfiguration.Evaluators()) + imageEvaluators := image.NewReplenishmentEvaluators( + listerFuncForResource, + controllerCtx.ImageInformers.Image().V1().ImageStreams(), + controllerCtx.ClientBuilder.OpenshiftImageClientOrDie(saName).ImageV1()) + for i := range imageEvaluators { + resourceQuotaRegistry.Add(imageEvaluators[i]) + } + + options := clusterquotareconciliation.ClusterQuotaReconcilationControllerOptions{ + ClusterQuotaInformer: controllerCtx.QuotaInformers.Quota().V1().ClusterResourceQuotas(), + ClusterQuotaMapper: clusterQuotaMappingController.GetClusterQuotaMapper(), + ClusterQuotaClient: controllerCtx.ClientBuilder.OpenshiftQuotaClientOrDie(saName).QuotaV1().ClusterResourceQuotas(), + + Registry: resourceQuotaRegistry, + ResyncPeriod: defaultResyncPeriod, + ReplenishmentResyncPeriod: controller.StaticResyncPeriodFunc(defaultReplenishmentSyncPeriod), + DiscoveryFunc: discoveryFunc, + IgnoredResourcesFunc: quotaConfiguration.IgnoredResources, + InformersStarted: controllerCtx.InformersStarted, + InformerFactory: controllerCtx.GenericResourceInformer, + } + clusterQuotaReconciliationController, err := clusterquotareconciliation.NewClusterQuotaReconcilationController(options) + if err != nil { + return true, err + } + clusterQuotaMappingController.GetClusterQuotaMapper().AddListener(clusterQuotaReconciliationController) + + go clusterQuotaMappingController.Run(5, ctx.Done()) + go clusterQuotaReconciliationController.Run(5, ctx.Done()) + go clusterQuotaReconciliationController.Sync(discoveryFunc, 30*time.Second, ctx.Done()) + + return true, nil +} + +func calculateResyncPeriod(period time.Duration) func() time.Duration { + return func() time.Duration { + factor := rand.Float64() + 1 + return time.Duration(float64(period.Nanoseconds()) * factor) + } +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/security.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/security.go new file mode 100644 index 0000000000..45e228d9fd --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/cmd/controller/security.go @@ -0,0 +1,43 @@ +package controller + +import ( + "context" + "fmt" + + "github.com/openshift/library-go/pkg/security/uid" + + sccallocation "github.com/openshift/cluster-policy-controller/pkg/security/controller" + "github.com/openshift/cluster-policy-controller/pkg/security/mcs" +) + +func RunNamespaceSecurityAllocationController(ctx context.Context, controllerCtx *EnhancedControllerContext) (bool, error) { + uidRange, err := uid.ParseRange(controllerCtx.OpenshiftControllerConfig.SecurityAllocator.UIDAllocatorRange) + if err != nil { + return true, fmt.Errorf("unable to describe UID range: %v", err) + } + mcsRange, err := mcs.ParseRange(controllerCtx.OpenshiftControllerConfig.SecurityAllocator.MCSAllocatorRange) + if err != nil { + return true, fmt.Errorf("unable to describe MCS category range: %v", err) + } + + kubeClient, err := controllerCtx.ClientBuilder.Client(infraNamespaceSecurityAllocationControllerServiceAccountName) + if err != nil { + return true, err + } + securityClient, err := controllerCtx.ClientBuilder.OpenshiftSecurityClient(infraNamespaceSecurityAllocationControllerServiceAccountName) + if err != nil { + return true, err + } + + controller := sccallocation.NewNamespaceSCCAllocationController( + controllerCtx.KubernetesInformers.Core().V1().Namespaces(), + kubeClient.CoreV1().Namespaces(), + securityClient.SecurityV1(), + uidRange, + sccallocation.DefaultMCSAllocation(uidRange, mcsRange, controllerCtx.OpenshiftControllerConfig.SecurityAllocator.MCSLabelsPerProject), + controllerCtx.EventRecorder, + ) + go controller.Run(ctx, 1) + + return true, nil +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/clusterquotareconciliation/reconciliation_controller.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/clusterquotareconciliation/reconciliation_controller.go new file mode 100644 index 0000000000..85780c6e96 --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/clusterquotareconciliation/reconciliation_controller.go @@ -0,0 +1,457 @@ +package clusterquotareconciliation + +import ( + "context" + "fmt" + "reflect" + "sync" + "time" + + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + kutilerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + utilquota "k8s.io/apiserver/pkg/quota/v1" + "k8s.io/client-go/discovery" + "k8s.io/client-go/tools/cache" + "k8s.io/controller-manager/pkg/informerfactory" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/controller/resourcequota" + + quotav1 "github.com/openshift/api/quota/v1" + quotatypedclient "github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1" + quotainformer "github.com/openshift/client-go/quota/informers/externalversions/quota/v1" + quotalister "github.com/openshift/client-go/quota/listers/quota/v1" + "github.com/openshift/library-go/pkg/quota/clusterquotamapping" + quotautil "github.com/openshift/library-go/pkg/quota/quotautil" +) + +type ClusterQuotaReconcilationControllerOptions struct { + ClusterQuotaInformer quotainformer.ClusterResourceQuotaInformer + ClusterQuotaMapper clusterquotamapping.ClusterQuotaMapper + ClusterQuotaClient quotatypedclient.ClusterResourceQuotaInterface + + // Knows how to calculate usage + Registry utilquota.Registry + // Controls full recalculation of quota usage + ResyncPeriod time.Duration + // Discover list of supported resources on the server. + DiscoveryFunc resourcequota.NamespacedResourcesFunc + // A function that returns the list of resources to ignore + IgnoredResourcesFunc func() map[schema.GroupResource]struct{} + // InformersStarted knows if informers were started. + InformersStarted <-chan struct{} + // InformerFactory interfaces with informers. + InformerFactory informerfactory.InformerFactory + // Controls full resync of objects monitored for replenihsment. + ReplenishmentResyncPeriod controller.ResyncPeriodFunc +} + +type ClusterQuotaReconcilationController struct { + clusterQuotaLister quotalister.ClusterResourceQuotaLister + clusterQuotaMapper clusterquotamapping.ClusterQuotaMapper + clusterQuotaClient quotatypedclient.ClusterResourceQuotaInterface + // A list of functions that return true when their caches have synced + informerSyncedFuncs []cache.InformerSynced + + resyncPeriod time.Duration + + // queue tracks which clusterquotas to update along with a list of namespaces for that clusterquota + queue BucketingWorkQueue + + // knows how to calculate usage + registry utilquota.Registry + // knows how to monitor all the resources tracked by quota and trigger replenishment + quotaMonitor *resourcequota.QuotaMonitor + // controls the workers that process quotas + // this lock is acquired to control write access to the monitors and ensures that all + // monitors are synced before the controller can process quotas. + workerLock sync.RWMutex +} + +type workItem struct { + namespaceName string + forceRecalculation bool +} + +func NewClusterQuotaReconcilationController(options ClusterQuotaReconcilationControllerOptions) (*ClusterQuotaReconcilationController, error) { + c := &ClusterQuotaReconcilationController{ + clusterQuotaLister: options.ClusterQuotaInformer.Lister(), + clusterQuotaMapper: options.ClusterQuotaMapper, + clusterQuotaClient: options.ClusterQuotaClient, + informerSyncedFuncs: []cache.InformerSynced{options.ClusterQuotaInformer.Informer().HasSynced}, + + resyncPeriod: options.ResyncPeriod, + registry: options.Registry, + + queue: NewBucketingWorkQueue("controller_clusterquotareconcilationcontroller"), + } + + // we need to trigger every time + options.ClusterQuotaInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.addClusterQuota, + UpdateFunc: c.updateClusterQuota, + }) + + qm := resourcequota.NewMonitor( + options.InformersStarted, + options.InformerFactory, + options.IgnoredResourcesFunc(), + options.ReplenishmentResyncPeriod, + c.replenishQuota, + c.registry, + ) + + c.quotaMonitor = qm + + // do initial quota monitor setup. If we have a discovery failure here, it's ok. We'll discover more resources when a later sync happens. + resources, err := resourcequota.GetQuotableResources(options.DiscoveryFunc) + if discovery.IsGroupDiscoveryFailedError(err) { + utilruntime.HandleError(fmt.Errorf("initial discovery check failure, continuing and counting on future sync update: %v", err)) + } else if err != nil { + return nil, err + } + + if err = qm.SyncMonitors(resources); err != nil { + utilruntime.HandleError(fmt.Errorf("initial monitor sync has error: %v", err)) + } + + // only start quota once all informers synced + c.informerSyncedFuncs = append(c.informerSyncedFuncs, qm.IsSynced) + + return c, nil +} + +// Run begins quota controller using the specified number of workers +func (c *ClusterQuotaReconcilationController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + klog.Infof("Starting the cluster quota reconciliation controller") + + // the controllers that replenish other resources to respond rapidly to state changes + go c.quotaMonitor.Run(stopCh) + + if !cache.WaitForCacheSync(stopCh, c.informerSyncedFuncs...) { + return + } + + // the workers that chug through the quota calculation backlog + for i := 0; i < workers; i++ { + go wait.Until(c.worker, time.Second, stopCh) + } + + // the timer for how often we do a full recalculation across all quotas + go wait.Until(func() { c.calculateAll() }, c.resyncPeriod, stopCh) + + <-stopCh + klog.Infof("Shutting down ClusterQuotaReconcilationController") + c.queue.ShutDown() +} + +// Sync periodically resyncs the controller when new resources are observed from discovery. +func (c *ClusterQuotaReconcilationController) Sync(discoveryFunc resourcequota.NamespacedResourcesFunc, period time.Duration, stopCh <-chan struct{}) { + // Something has changed, so track the new state and perform a sync. + oldResources := make(map[schema.GroupVersionResource]struct{}) + wait.Until(func() { + // Get the current resource list from discovery. + newResources, err := resourcequota.GetQuotableResources(discoveryFunc) + if err != nil { + utilruntime.HandleError(err) + + if discovery.IsGroupDiscoveryFailedError(err) && len(newResources) > 0 { + // In partial discovery cases, don't remove any existing informers, just add new ones + for k, v := range oldResources { + newResources[k] = v + } + } else { + // short circuit in non-discovery error cases or if discovery returned zero resources + return + } + } + + // Decide whether discovery has reported a change. + if reflect.DeepEqual(oldResources, newResources) { + klog.V(4).Infof("no resource updates from discovery, skipping resource quota sync") + return + } + + // Something has changed, so track the new state and perform a sync. + klog.V(2).Infof("syncing resource quota controller with updated resources from discovery: %v", newResources) + oldResources = newResources + + // Ensure workers are paused to avoid processing events before informers + // have resynced. + c.workerLock.Lock() + defer c.workerLock.Unlock() + + // Perform the monitor resync and wait for controllers to report cache sync. + if err := c.resyncMonitors(newResources); err != nil { + utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors: %v", err)) + return + } + if c.quotaMonitor != nil && !cache.WaitForCacheSync(stopCh, c.quotaMonitor.IsSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for quota monitor sync")) + } + }, period, stopCh) +} + +// resyncMonitors starts or stops quota monitors as needed to ensure that all +// (and only) those resources present in the map are monitored. +func (c *ClusterQuotaReconcilationController) resyncMonitors(resources map[schema.GroupVersionResource]struct{}) error { + // SyncMonitors can only fail if there was no Informer for the given gvr + err := c.quotaMonitor.SyncMonitors(resources) + // this is no-op for already running monitors + c.quotaMonitor.StartMonitors() + return err +} + +func (c *ClusterQuotaReconcilationController) calculate(quotaName string, namespaceNames ...string) { + if len(namespaceNames) == 0 { + return + } + items := make([]interface{}, 0, len(namespaceNames)) + for _, name := range namespaceNames { + items = append(items, workItem{namespaceName: name, forceRecalculation: false}) + } + + c.queue.AddWithData(quotaName, items...) +} + +func (c *ClusterQuotaReconcilationController) forceCalculation(quotaName string, namespaceNames ...string) { + if len(namespaceNames) == 0 { + return + } + items := make([]interface{}, 0, len(namespaceNames)) + for _, name := range namespaceNames { + items = append(items, workItem{namespaceName: name, forceRecalculation: true}) + } + + c.queue.AddWithData(quotaName, items...) +} + +func (c *ClusterQuotaReconcilationController) calculateAll() { + quotas, err := c.clusterQuotaLister.List(labels.Everything()) + if err != nil { + utilruntime.HandleError(err) + return + } + + for _, quota := range quotas { + // If we have namespaces we map to, force calculating those namespaces + namespaces, _ := c.clusterQuotaMapper.GetNamespacesFor(quota.Name) + if len(namespaces) > 0 { + c.forceCalculation(quota.Name, namespaces...) + continue + } + + // If the quota status has namespaces when our mapper doesn't think it should, + // add it directly to the queue without any work items + if len(quota.Status.Namespaces) > 0 { + c.queue.AddWithData(quota.Name) + continue + } + } +} + +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncHandler is never invoked concurrently with the same key. +func (c *ClusterQuotaReconcilationController) worker() { + workFunc := func() bool { + uncastKey, uncastData, quit := c.queue.GetWithData() + if quit { + return true + } + defer c.queue.Done(uncastKey) + + c.workerLock.RLock() + defer c.workerLock.RUnlock() + + quotaName := uncastKey.(string) + quota, err := c.clusterQuotaLister.Get(quotaName) + if apierrors.IsNotFound(err) { + c.queue.Forget(uncastKey) + return false + } + if err != nil { + utilruntime.HandleError(err) + c.queue.AddWithDataRateLimited(uncastKey, uncastData...) + return false + } + + workItems := make([]workItem, 0, len(uncastData)) + for _, dataElement := range uncastData { + workItems = append(workItems, dataElement.(workItem)) + } + err, retryItems := c.syncQuotaForNamespaces(quota, workItems) + if err == nil { + c.queue.Forget(uncastKey) + return false + } + utilruntime.HandleError(err) + + items := make([]interface{}, 0, len(retryItems)) + for _, item := range retryItems { + items = append(items, item) + } + c.queue.AddWithDataRateLimited(uncastKey, items...) + return false + } + + for { + if quit := workFunc(); quit { + klog.Infof("resource quota controller worker shutting down") + return + } + } +} + +// syncResourceQuotaFromKey syncs a quota key +func (c *ClusterQuotaReconcilationController) syncQuotaForNamespaces(originalQuota *quotav1.ClusterResourceQuota, workItems []workItem) (error, []workItem /* to retry */) { + quota := originalQuota.DeepCopy() + + // get the list of namespaces that match this cluster quota + matchingNamespaceNamesList, quotaSelector := c.clusterQuotaMapper.GetNamespacesFor(quota.Name) + if !equality.Semantic.DeepEqual(quotaSelector, quota.Spec.Selector) { + return fmt.Errorf("mapping not up to date, have=%v need=%v", quotaSelector, quota.Spec.Selector), workItems + } + matchingNamespaceNames := sets.NewString(matchingNamespaceNamesList...) + + reconcilationErrors := []error{} + retryItems := []workItem{} + for _, item := range workItems { + namespaceName := item.namespaceName + namespaceTotals, namespaceLoaded := quotautil.GetResourceQuotasStatusByNamespace(quota.Status.Namespaces, namespaceName) + if !matchingNamespaceNames.Has(namespaceName) { + if namespaceLoaded { + // remove this item from all totals + quota.Status.Total.Used = utilquota.Subtract(quota.Status.Total.Used, namespaceTotals.Used) + quotautil.RemoveResourceQuotasStatusByNamespace("a.Status.Namespaces, namespaceName) + } + continue + } + + // if there's no work for us to do, do nothing + if !item.forceRecalculation && namespaceLoaded && equality.Semantic.DeepEqual(namespaceTotals.Hard, quota.Spec.Quota.Hard) { + continue + } + + actualUsage, err := quotaUsageCalculationFunc(namespaceName, quota.Spec.Quota.Scopes, quota.Spec.Quota.Hard, c.registry, quota.Spec.Quota.ScopeSelector) + if err != nil { + // tally up errors, but calculate everything you can + reconcilationErrors = append(reconcilationErrors, err) + retryItems = append(retryItems, item) + continue + } + recalculatedStatus := corev1.ResourceQuotaStatus{ + Used: actualUsage, + Hard: quota.Spec.Quota.Hard, + } + + // subtract old usage, add new usage + quota.Status.Total.Used = utilquota.Subtract(quota.Status.Total.Used, namespaceTotals.Used) + quota.Status.Total.Used = utilquota.Add(quota.Status.Total.Used, recalculatedStatus.Used) + quotautil.InsertResourceQuotasStatus("a.Status.Namespaces, quotav1.ResourceQuotaStatusByNamespace{ + Namespace: namespaceName, + Status: recalculatedStatus, + }) + } + + // Remove any namespaces from quota.status that no longer match. + // Needed because we will never get workitems for namespaces that no longer exist if we missed the delete event (e.g. on startup) + // range on a copy so that we don't mutate our original + statusCopy := quota.Status.Namespaces.DeepCopy() + for _, namespaceTotals := range statusCopy { + namespaceName := namespaceTotals.Namespace + if !matchingNamespaceNames.Has(namespaceName) { + quota.Status.Total.Used = utilquota.Subtract(quota.Status.Total.Used, namespaceTotals.Status.Used) + quotautil.RemoveResourceQuotasStatusByNamespace("a.Status.Namespaces, namespaceName) + } + } + + quota.Status.Total.Hard = quota.Spec.Quota.Hard + + // if there's no change, no update, return early. NewAggregate returns nil on empty input + if equality.Semantic.DeepEqual(quota, originalQuota) { + return kutilerrors.NewAggregate(reconcilationErrors), retryItems + } + + if _, err := c.clusterQuotaClient.UpdateStatus(context.TODO(), quota, metav1.UpdateOptions{}); err != nil { + return kutilerrors.NewAggregate(append(reconcilationErrors, err)), workItems + } + + return kutilerrors.NewAggregate(reconcilationErrors), retryItems +} + +// replenishQuota is a replenishment function invoked by a controller to notify that a quota should be recalculated +func (c *ClusterQuotaReconcilationController) replenishQuota(groupResource schema.GroupResource, namespace string) { + // check if the quota controller can evaluate this kind, if not, ignore it altogether... + releventEvaluators := []utilquota.Evaluator{} + evaluators := c.registry.List() + for i := range evaluators { + evaluator := evaluators[i] + if evaluator.GroupResource() == groupResource { + releventEvaluators = append(releventEvaluators, evaluator) + } + } + if len(releventEvaluators) == 0 { + return + } + + quotaNames, _ := c.clusterQuotaMapper.GetClusterQuotasFor(namespace) + + // only queue those quotas that are tracking a resource associated with this kind. + for _, quotaName := range quotaNames { + quota, err := c.clusterQuotaLister.Get(quotaName) + if err != nil { + // replenishment will be delayed, but we'll get back around to it later if it matters + continue + } + + resourceQuotaResources := utilquota.ResourceNames(quota.Status.Total.Hard) + for _, evaluator := range releventEvaluators { + matchedResources := evaluator.MatchingResources(resourceQuotaResources) + if len(matchedResources) > 0 { + // TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota. + c.forceCalculation(quotaName, namespace) + break + } + } + } +} + +func (c *ClusterQuotaReconcilationController) addClusterQuota(cur interface{}) { + c.enqueueClusterQuota(cur) +} +func (c *ClusterQuotaReconcilationController) updateClusterQuota(old, cur interface{}) { + c.enqueueClusterQuota(cur) +} +func (c *ClusterQuotaReconcilationController) enqueueClusterQuota(obj interface{}) { + quota, ok := obj.(*quotav1.ClusterResourceQuota) + if !ok { + utilruntime.HandleError(fmt.Errorf("not a ClusterResourceQuota %v", obj)) + return + } + + namespaces, _ := c.clusterQuotaMapper.GetNamespacesFor(quota.Name) + c.calculate(quota.Name, namespaces...) +} + +func (c *ClusterQuotaReconcilationController) AddMapping(quotaName, namespaceName string) { + c.calculate(quotaName, namespaceName) + +} +func (c *ClusterQuotaReconcilationController) RemoveMapping(quotaName, namespaceName string) { + c.calculate(quotaName, namespaceName) +} + +// quotaUsageCalculationFunc is a function to calculate quota usage. It is only configurable for easy unit testing +// NEVER CHANGE THIS OUTSIDE A TEST +var quotaUsageCalculationFunc = utilquota.CalculateUsage diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/clusterquotareconciliation/workqueuebucket.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/clusterquotareconciliation/workqueuebucket.go new file mode 100644 index 0000000000..a984222087 --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/clusterquotareconciliation/workqueuebucket.go @@ -0,0 +1,114 @@ +package clusterquotareconciliation + +import ( + "sync" + + "k8s.io/client-go/util/workqueue" +) + +// BucketingWorkQueue gives a way to add items related to a single entry in a work queue +// this allows you work on a set of related work in a single UOW-style way +type BucketingWorkQueue interface { + AddWithData(key interface{}, data ...interface{}) + AddWithDataRateLimited(key interface{}, data ...interface{}) + GetWithData() (key interface{}, data []interface{}, quit bool) + Done(key interface{}) + Forget(key interface{}) + + ShutDown() +} + +func NewBucketingWorkQueue(name string) BucketingWorkQueue { + return &workQueueBucket{ + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), + work: map[interface{}][]interface{}{}, + dirtyWork: map[interface{}][]interface{}{}, + inProgress: map[interface{}]bool{}, + } +} + +type workQueueBucket struct { + // TODO these are used together to bucket items by namespace and then batch them up for processing. + // The technique is valuable for rollup activities to avoid fanout and reduce resource contention. + // We could move this into a library if another component needed it. + // queue is indexed by namespace, so that we bundle up on a per-namespace basis + queue workqueue.RateLimitingInterface + workLock sync.Mutex + work map[interface{}][]interface{} + dirtyWork map[interface{}][]interface{} + inProgress map[interface{}]bool +} + +func (e *workQueueBucket) AddWithData(key interface{}, data ...interface{}) { + e.workLock.Lock() + defer e.workLock.Unlock() + + // this Add can trigger a Get BEFORE the work is added to a list, but this is ok because the getWork routine + // waits the worklock before retrieving the work to do, so the writes in this method will be observed + e.queue.Add(key) + + if e.inProgress[key] { + e.dirtyWork[key] = append(e.dirtyWork[key], data...) + return + } + + e.work[key] = append(e.work[key], data...) +} + +func (e *workQueueBucket) AddWithDataRateLimited(key interface{}, data ...interface{}) { + e.workLock.Lock() + defer e.workLock.Unlock() + + // this Add can trigger a Get BEFORE the work is added to a list, but this is ok because the getWork routine + // waits the worklock before retrieving the work to do, so the writes in this method will be observed + e.queue.AddRateLimited(key) + + if e.inProgress[key] { + e.dirtyWork[key] = append(e.dirtyWork[key], data...) + return + } + + e.work[key] = append(e.work[key], data...) +} + +func (e *workQueueBucket) Done(key interface{}) { + e.workLock.Lock() + defer e.workLock.Unlock() + + e.queue.Done(key) + e.work[key] = e.dirtyWork[key] + delete(e.dirtyWork, key) + delete(e.inProgress, key) +} + +func (e *workQueueBucket) Forget(key interface{}) { + e.queue.Forget(key) +} + +func (e *workQueueBucket) GetWithData() (interface{}, []interface{}, bool) { + key, shutdown := e.queue.Get() + if shutdown { + return nil, []interface{}{}, shutdown + } + + e.workLock.Lock() + defer e.workLock.Unlock() + // at this point, we know we have a coherent view of e.work. It is entirely possible + // that our workqueue has another item requeued to it, but we'll pick it up early. This ok + // because the next time will go into our dirty list + + work := e.work[key] + delete(e.work, key) + delete(e.dirtyWork, key) + e.inProgress[key] = true + + if len(work) != 0 { + return key, work, false + } + + return key, []interface{}{}, false +} + +func (e *workQueueBucket) ShutDown() { + e.queue.ShutDown() +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/quotaimageexternal/imagestreamimport_evaluator.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/quotaimageexternal/imagestreamimport_evaluator.go new file mode 100644 index 0000000000..413e9e9190 --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/quotaimageexternal/imagestreamimport_evaluator.go @@ -0,0 +1,99 @@ +package quotaimageexternal + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + kadmission "k8s.io/apiserver/pkg/admission" + kquota "k8s.io/apiserver/pkg/quota/v1" + "k8s.io/apiserver/pkg/quota/v1/generic" + + imagev1 "github.com/openshift/api/image/v1" + imagev1lister "github.com/openshift/client-go/image/listers/image/v1" +) + +var imageStreamImportResources = []corev1.ResourceName{ + imagev1.ResourceImageStreams, +} + +type imageStreamImportEvaluator struct { + store imagev1lister.ImageStreamLister +} + +// NewImageStreamImportEvaluator computes resource usage for ImageStreamImport objects. This particular kind +// is a virtual resource. It depends on ImageStream usage evaluator to compute image numbers before the +// the admission can work. +// If you make a change here, be sure to make a corresponding change in the apiserver +func NewImageStreamImportEvaluator(store imagev1lister.ImageStreamLister) kquota.Evaluator { + return &imageStreamImportEvaluator{ + store: store, + } +} + +// Constraints checks that given object is an image stream import. +func (i *imageStreamImportEvaluator) Constraints(required []corev1.ResourceName, object runtime.Object) error { + _, okExt := object.(*imagev1.ImageStreamImport) + if !okExt { + return fmt.Errorf("unexpected input object %v", object) + } + return nil +} + +func (i *imageStreamImportEvaluator) GroupResource() schema.GroupResource { + return schema.GroupResource{Group: "image.openshift.io", Resource: "imagestreamimports"} +} + +func (i *imageStreamImportEvaluator) Handles(a kadmission.Attributes) bool { + return a.GetOperation() == kadmission.Create +} + +func (i *imageStreamImportEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { + matchesScopeFunc := func(corev1.ScopedResourceSelectorRequirement, runtime.Object) (bool, error) { return true, nil } + return generic.Matches(resourceQuota, item, i.MatchingResources, matchesScopeFunc) +} + +func (p *imageStreamImportEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil +} + +func (p *imageStreamImportEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil +} + +func (i *imageStreamImportEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName { + return kquota.Intersection(input, imageStreamImportResources) +} + +func (i *imageStreamImportEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) { + isi, ok := item.(*imagev1.ImageStreamImport) + if !ok { + return corev1.ResourceList{}, fmt.Errorf("item is not an ImageStreamImport: %T", item) + } + + usage := map[corev1.ResourceName]resource.Quantity{ + imagev1.ResourceImageStreams: *resource.NewQuantity(0, resource.DecimalSI), + } + + if !isi.Spec.Import || (len(isi.Spec.Images) == 0 && isi.Spec.Repository == nil) { + return usage, nil + } + + is, err := i.store.ImageStreams(isi.Namespace).Get(isi.Name) + if err != nil && !kerrors.IsNotFound(err) { + utilruntime.HandleError(fmt.Errorf("failed to list image streams: %v", err)) + } + if is == nil || kerrors.IsNotFound(err) { + usage[imagev1.ResourceImageStreams] = *resource.NewQuantity(1, resource.DecimalSI) + } + + return usage, nil +} + +func (i *imageStreamImportEvaluator) UsageStats(options kquota.UsageStatsOptions) (kquota.UsageStats, error) { + return kquota.UsageStats{}, nil +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/quotaimageexternal/imagestreamtag_evaluator.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/quotaimageexternal/imagestreamtag_evaluator.go new file mode 100644 index 0000000000..381d4b8902 --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/quotaimageexternal/imagestreamtag_evaluator.go @@ -0,0 +1,104 @@ +package quotaimageexternal + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + kadmission "k8s.io/apiserver/pkg/admission" + kquota "k8s.io/apiserver/pkg/quota/v1" + "k8s.io/apiserver/pkg/quota/v1/generic" + + imagev1 "github.com/openshift/api/image/v1" + imagev1typedclient "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + imagev1lister "github.com/openshift/client-go/image/listers/image/v1" + "github.com/openshift/library-go/pkg/image/imageutil" +) + +var imageStreamTagResources = []corev1.ResourceName{ + imagev1.ResourceImageStreams, +} + +type imageStreamTagEvaluator struct { + store imagev1lister.ImageStreamLister + istGetter imagev1typedclient.ImageStreamTagsGetter +} + +// NewImageStreamTagEvaluator computes resource usage of ImageStreamsTags. Its sole purpose is to handle +// UPDATE admission operations on imageStreamTags resource. +// If you make a change here, be sure to make a corresponding change in the apiserver +func NewImageStreamTagEvaluator(store imagev1lister.ImageStreamLister, istGetter imagev1typedclient.ImageStreamTagsGetter) kquota.Evaluator { + return &imageStreamTagEvaluator{ + store: store, + istGetter: istGetter, + } +} + +// Constraints checks that given object is an image stream tag +func (i *imageStreamTagEvaluator) Constraints(required []corev1.ResourceName, object runtime.Object) error { + _, okExt := object.(*imagev1.ImageStreamTag) + if !okExt { + return fmt.Errorf("unexpected input object %v", object) + } + return nil +} + +func (i *imageStreamTagEvaluator) GroupResource() schema.GroupResource { + return schema.GroupResource{Group: "image.openshift.io", Resource: "imagestreamtags"} +} + +func (i *imageStreamTagEvaluator) Handles(a kadmission.Attributes) bool { + operation := a.GetOperation() + return operation == kadmission.Create || operation == kadmission.Update +} + +func (i *imageStreamTagEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { + matchesScopeFunc := func(corev1.ScopedResourceSelectorRequirement, runtime.Object) (bool, error) { return true, nil } + return generic.Matches(resourceQuota, item, i.MatchingResources, matchesScopeFunc) +} + +func (p *imageStreamTagEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil +} + +func (p *imageStreamTagEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil +} + +func (i *imageStreamTagEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName { + return kquota.Intersection(input, imageStreamTagResources) +} + +func (i *imageStreamTagEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) { + ist, ok := item.(*imagev1.ImageStreamTag) + if !ok { + return corev1.ResourceList{}, nil + } + + res := map[corev1.ResourceName]resource.Quantity{ + imagev1.ResourceImageStreams: *resource.NewQuantity(0, resource.BinarySI), + } + + isName, _, err := imageutil.ParseImageStreamTagName(ist.Name) + if err != nil { + return corev1.ResourceList{}, err + } + + is, err := i.store.ImageStreams(ist.Namespace).Get(isName) + if err != nil && !kerrors.IsNotFound(err) { + utilruntime.HandleError(fmt.Errorf("failed to get image stream %s/%s: %v", ist.Namespace, isName, err)) + } + if is == nil || kerrors.IsNotFound(err) { + res[imagev1.ResourceImageStreams] = *resource.NewQuantity(1, resource.BinarySI) + } + + return res, nil +} + +func (i *imageStreamTagEvaluator) UsageStats(options kquota.UsageStatsOptions) (kquota.UsageStats, error) { + return kquota.UsageStats{}, nil +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/quotaimageexternal/registry.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/quotaimageexternal/registry.go new file mode 100644 index 0000000000..244a4e201a --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/quota/quotaimageexternal/registry.go @@ -0,0 +1,33 @@ +// Package image implements evaluators of usage for imagestreams and images. They are supposed +// to be passed to resource quota controller and origin resource quota admission plugin. +package quotaimageexternal + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + kquota "k8s.io/apiserver/pkg/quota/v1" + "k8s.io/apiserver/pkg/quota/v1/generic" + + imagev1 "github.com/openshift/api/image/v1" + imagev1typedclient "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + imagev1informer "github.com/openshift/client-go/image/informers/externalversions/image/v1" +) + +var legacyObjectCountAliases = map[schema.GroupVersionResource]corev1.ResourceName{ + imagev1.GroupVersion.WithResource("imagestreams"): imagev1.ResourceImageStreams, +} + +// NewEvaluators returns the list of static evaluators that manage more than counts +func NewReplenishmentEvaluators(f kquota.ListerForResourceFunc, isInformer imagev1informer.ImageStreamInformer, imageClient imagev1typedclient.ImageStreamTagsGetter) []kquota.Evaluator { + // these evaluators have special logic + result := []kquota.Evaluator{ + NewImageStreamTagEvaluator(isInformer.Lister(), imageClient), + NewImageStreamImportEvaluator(isInformer.Lister()), + } + // these evaluators require an alias for backwards compatibility + for gvr, alias := range legacyObjectCountAliases { + result = append(result, + generic.NewObjectCountEvaluator(gvr.GroupResource(), generic.ListResourceUsingListerFunc(f, gvr), alias)) + } + return result +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/security/controller/namespace_scc_allocation_controller.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/security/controller/namespace_scc_allocation_controller.go new file mode 100644 index 0000000000..d7676e715f --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/security/controller/namespace_scc_allocation_controller.go @@ -0,0 +1,351 @@ +package controller + +import ( + "context" + "errors" + "fmt" + "github.com/openshift/library-go/pkg/operator/events" + "math/big" + "reflect" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + runtimejson "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + corev1informers "k8s.io/client-go/informers/core/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + coreapi "k8s.io/kubernetes/pkg/apis/core" + + securityv1 "github.com/openshift/api/security/v1" + securityinternalv1 "github.com/openshift/api/securityinternal/v1" + securityv1client "github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1" + "github.com/openshift/cluster-policy-controller/pkg/security/mcs" + "github.com/openshift/cluster-policy-controller/pkg/security/uidallocator" + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/security/uid" +) + +const ( + controllerName = "namespace-security-allocation-controller" + rangeName = "scc-uid" + initialRepairKey = "__internal/initialRepair" + + // For this controller to work correctly we need to ensure a periodic repair + // of all the range allocations. For that we are using a key "key" + // which will trigger that every 8 hours. + resyncPeriod = "@every 8h" +) + +// NamespaceSCCAllocationController allocates uids/labels for namespaces +type NamespaceSCCAllocationController struct { + requiredUIDRange *uid.Range + mcsAllocator MCSAllocationFunc + nsLister corev1listers.NamespaceLister + currentUIDRangeAllocation *securityinternalv1.RangeAllocation + + namespaceClient corev1client.NamespaceInterface + rangeAllocationClient securityv1client.RangeAllocationsGetter + + encoder runtime.Encoder +} + +func NewNamespaceSCCAllocationController(namespaceInformer corev1informers.NamespaceInformer, client corev1client.NamespaceInterface, rangeAllocationClient securityv1client.RangeAllocationsGetter, requiredUIDRange *uid.Range, mcs MCSAllocationFunc, eventRecorder events.Recorder) factory.Controller { + scheme := runtime.NewScheme() + utilruntime.Must(corev1.AddToScheme(scheme)) + codecs := serializer.NewCodecFactory(scheme) + jsonSerializer := runtimejson.NewSerializer(runtimejson.DefaultMetaFactory, scheme, scheme, false) + encoder := codecs.WithoutConversion().EncoderForVersion(jsonSerializer, corev1.SchemeGroupVersion) + + c := &NamespaceSCCAllocationController{ + requiredUIDRange: requiredUIDRange, + mcsAllocator: mcs, + namespaceClient: client, + rangeAllocationClient: rangeAllocationClient, + nsLister: namespaceInformer.Lister(), + encoder: encoder, + } + + eventRecorderWithSuffix := eventRecorder.WithComponentSuffix(controllerName) + syncContext := factory.NewSyncContext(controllerName, eventRecorder) + syncContext.Queue().Add(initialRepairKey) + + enqueueNamespace := func(obj interface{}) { + ns, ok := obj.(*corev1.Namespace) + if !ok { + return + } + syncContext.Queue().Add(asNamespaceNameKey(ns.Name)) + } + + namespaceInformer.Informer().AddEventHandlerWithResyncPeriod( + cache.ResourceEventHandlerFuncs{ + AddFunc: enqueueNamespace, + UpdateFunc: func(oldObj, newObj interface{}) { + enqueueNamespace(newObj) + }, + }, + 10*time.Minute, + ) + + return factory.New().ResyncSchedule(resyncPeriod).WithBareInformers(namespaceInformer.Informer()).WithSyncContext(syncContext).WithSync(c.sync). + ToController(controllerName, eventRecorderWithSuffix) +} + +// sync handles initial and periodic repair and namespace creation/update +func (c *NamespaceSCCAllocationController) sync(ctx context.Context, syncCtx factory.SyncContext) error { + key := syncCtx.QueueKey() + + switch key { + case initialRepairKey: + klog.V(1).Infof("Repairing SCC UID Allocations") + if err := c.WaitForRepair(ctx, syncCtx); err != nil { + // this is consistent with previous behavior + klog.Fatal(err) + } + klog.V(1).Infof("Repair complete") + case factory.DefaultQueueKey: // periodic repair + if err := c.Repair(ctx, syncCtx); err != nil { + return fmt.Errorf("error during periodic repair: %v", err) + } + default: + namespaceName, err := parseNamespaceNameKey(key) + if err != nil { + return err + } + return c.syncNamespace(ctx, syncCtx, namespaceName) + } + return nil +} + +// syncNamespace will sync the namespace with the given key. +// This function is not meant to be invoked concurrently with the same key. +func (c *NamespaceSCCAllocationController) syncNamespace(ctx context.Context, syncCtx factory.SyncContext, namespaceName string) error { + ns, err := c.nsLister.Get(namespaceName) + if apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + if _, ok := ns.Annotations[securityv1.UIDRangeAnnotation]; ok { + return nil + } + + return c.allocate(ctx, syncCtx, ns) +} + +func (c *NamespaceSCCAllocationController) allocate(ctx context.Context, syncCtx factory.SyncContext, ns *corev1.Namespace) error { + // unless we affirmatively succeed, clear the local state to try again + success := false + defer func() { + if success { + return + } + c.currentUIDRangeAllocation = nil + }() + + // if we don't have the current state, go get it + if c.currentUIDRangeAllocation == nil { + newRange, err := c.rangeAllocationClient.RangeAllocations().Get(context.TODO(), rangeName, metav1.GetOptions{}) + if err != nil { + return err + } + c.currentUIDRangeAllocation = newRange + } + + // do uid allocation. We reserve the UID we want first, lock it in etcd, then update the namespace. + // We allocate by reading in a giant bit int bitmap (one bit per offset location), finding the next step, + // then calculating the offset location + uidRange, err := uid.ParseRange(c.currentUIDRangeAllocation.Range) + if err != nil { + return err + } + if !reflect.DeepEqual(*uidRange, *c.requiredUIDRange) { + return fmt.Errorf("conflicting UID range; expected %#v, got %#v", *c.requiredUIDRange, *uidRange) + } + allocatedBitMapInt := big.NewInt(0).SetBytes(c.currentUIDRangeAllocation.Data) + bitIndex, found := allocateNextContiguousBit(allocatedBitMapInt, int(uidRange.Size())) + if !found { + return fmt.Errorf("uid range exceeded") + } + allocatedBitMapInt = allocatedBitMapInt.SetBit(allocatedBitMapInt, bitIndex, 1) + newRangeAllocation := c.currentUIDRangeAllocation.DeepCopy() + newRangeAllocation.Data = allocatedBitMapInt.Bytes() + + actualRangeAllocation, err := c.rangeAllocationClient.RangeAllocations().Update(context.TODO(), newRangeAllocation, metav1.UpdateOptions{}) + if err != nil { + return err + } + c.currentUIDRangeAllocation = actualRangeAllocation + + block, ok := uidRange.BlockAt(uint32(bitIndex)) + if !ok { + return fmt.Errorf("%d not in range", bitIndex) + } + + // Now modify the namespace + nsCopy := ns.DeepCopy() + if nsCopy.Annotations == nil { + nsCopy.Annotations = make(map[string]string) + } + nsCopy.Annotations[securityv1.UIDRangeAnnotation] = block.String() + nsCopy.Annotations[securityv1.SupplementalGroupsAnnotation] = block.String() + if _, ok := nsCopy.Annotations[securityv1.MCSAnnotation]; !ok { + if label := c.mcsAllocator(block); label != nil { + nsCopy.Annotations[securityv1.MCSAnnotation] = label.String() + } + } + nsCopyBytes, err := runtime.Encode(c.encoder, nsCopy) + if err != nil { + return err + } + nsBytes, err := runtime.Encode(c.encoder, ns) + if err != nil { + return err + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(nsBytes, nsCopyBytes, &corev1.Namespace{}) + if err != nil { + return err + } + // use patch here not to conflict with other actors + _, err = c.namespaceClient.Patch(context.TODO(), ns.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + if apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + // emit event once per namespace. There aren't many of these, but it will let us know how long it takes from namespace creation + // until the SCC ranges are created. There is a suspicion that this takes a while. + syncCtx.Recorder().Eventf("CreatedSCCRanges", "created SCC ranges for %v namespace", ns.Name) + + success = true + return nil +} + +// allocateNextContiguousBit finds a free bit in the int and returns which one it is and whether it succeeded +func allocateNextContiguousBit(allocated *big.Int, max int) (int, bool) { + for i := 0; i < max; i++ { + if allocated.Bit(i) == 0 { + return i, true + } + } + return 0, false +} + +func (c *NamespaceSCCAllocationController) WaitForRepair(ctx context.Context, syncCtx factory.SyncContext) error { + return wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) { + select { + case <-ctx.Done(): + return true, nil + default: + } + err := c.Repair(ctx, syncCtx) + if err == nil { + return true, nil + } + utilruntime.HandleError(err) + return false, nil + }) +} + +func (c *NamespaceSCCAllocationController) Repair(ctx context.Context, syncCtx factory.SyncContext) error { + // TODO: (per smarterclayton) if Get() or List() is a weak consistency read, + // or if they are executed against different leaders, + // the ordering guarantee required to ensure no item is allocated twice is violated. + // List must return a ResourceVersion higher than the etcd index Get, + // and the release code must not release items that have allocated but not yet been created + // See https://github.com/kubernetes/kubernetes/issues/8295 + + // get the curr so we have a resourceVersion to pin to + uidRange, err := c.rangeAllocationClient.RangeAllocations().Get(context.TODO(), rangeName, metav1.GetOptions{}) + needCreate := apierrors.IsNotFound(err) + if err != nil && !needCreate { + return err + } + if needCreate { + uidRange = &securityinternalv1.RangeAllocation{ObjectMeta: metav1.ObjectMeta{Name: rangeName}} + } + + uids, err := uidallocator.NewInMemory(c.requiredUIDRange) + if err != nil { + return err + } + nsList, err := c.nsLister.List(labels.Everything()) + if err != nil { + return err + } + for _, ns := range nsList { + value, ok := ns.Annotations[securityv1.UIDRangeAnnotation] + if !ok { + continue + } + block, err := uid.ParseBlock(value) + if err != nil { + continue + } + + switch err := uids.Allocate(block); err { + case nil: + case uidallocator.ErrNotInRange, uidallocator.ErrAllocated: + continue + case uidallocator.ErrFull: + msg := fmt.Sprintf("the UID range %s is full; you must widen the range in order to allocate more UIDs", c.requiredUIDRange) + syncCtx.Recorder().Warning("UIDRangeFull", msg) + return errors.New(msg) + default: + return fmt.Errorf("unable to allocate UID block %s for namespace %s due to an unknown error, exiting: %v", block, ns.Name, err) + } + } + + newRangeAllocation := &coreapi.RangeAllocation{} + if err := uids.Snapshot(newRangeAllocation); err != nil { + return err + } + uidRange.Range = newRangeAllocation.Range + uidRange.Data = newRangeAllocation.Data + + if needCreate { + if _, err := c.rangeAllocationClient.RangeAllocations().Create(context.TODO(), uidRange, metav1.CreateOptions{}); err != nil { + return err + } + return nil + } + + if _, err := c.rangeAllocationClient.RangeAllocations().Update(context.TODO(), uidRange, metav1.UpdateOptions{}); err != nil { + return err + } + + return nil +} + +type MCSAllocationFunc func(uid.Block) *mcs.Label + +// DefaultMCSAllocation returns a label from the MCS range that matches the offset +// within the overall range. blockSize must be a positive integer representing the +// number of labels to jump past in the category space (if 1, range == label, if 2 +// each range will have two labels). +func DefaultMCSAllocation(from *uid.Range, to *mcs.Range, blockSize int) MCSAllocationFunc { + return func(block uid.Block) *mcs.Label { + ok, offset := from.Offset(block) + if !ok { + return nil + } + if blockSize > 0 { + offset = offset * uint32(blockSize) + } + label, _ := to.LabelAt(uint64(offset)) + return label + } +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/security/controller/utils.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/security/controller/utils.go new file mode 100644 index 0000000000..91640ba76d --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/security/controller/utils.go @@ -0,0 +1,23 @@ +package controller + +import ( + "fmt" + "strings" +) + +const ( + namespaceNameKeyPrefix = "ns" +) + +func asNamespaceNameKey(namespace string) (namespaceNameKey string) { + return namespaceNameKeyPrefix + "/" + namespace +} + +func parseNamespaceNameKey(key string) (namespace string, err error) { + parts := strings.Split(key, "/") + if len(parts) != 2 || parts[0] != namespaceNameKeyPrefix || parts[1] == "" { + return "", fmt.Errorf("unexpected namespace name key format: %q", key) + } + + return parts[1], nil +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/security/mcs/label.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/security/mcs/label.go new file mode 100644 index 0000000000..1893841887 --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/security/mcs/label.go @@ -0,0 +1,238 @@ +package mcs + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "strings" +) + +const maxCategories = 1024 + +type Label struct { + Prefix string + Categories +} + +// NewLabel creates a Label object based on the offset given by +// offset with a number of labels equal to k. Prefix may be any +// valid SELinux label (user:role:type:level:). +func NewLabel(prefix string, offset uint64, k uint) (*Label, error) { + if len(prefix) > 0 && !(strings.HasSuffix(prefix, ":") || strings.HasSuffix(prefix, ",")) { + prefix = prefix + ":" + } + return &Label{ + Prefix: prefix, + Categories: categoriesForOffset(offset, maxCategories, k), + }, nil +} + +// ParseLabel converts a string value representing an SELinux label +// into a Label object, extracting and ordering categories. +func ParseLabel(in string) (*Label, error) { + if len(in) == 0 { + return &Label{}, nil + } + + prefix := strings.Split(in, ":") + segment := prefix[len(prefix)-1] + if len(prefix) > 0 { + prefix = prefix[:len(prefix)-1] + } + prefixString := strings.Join(prefix, ":") + if len(prefixString) > 0 { + prefixString += ":" + } + + var categories Categories + for _, s := range strings.Split(segment, ",") { + if !strings.HasPrefix(s, "c") { + return nil, fmt.Errorf("categories must start with 'c': %s", segment) + } + i, err := strconv.Atoi(s[1:]) + if err != nil { + return nil, err + } + categories = append(categories, uint16(i)) + } + sort.Sort(categories) + + last := -1 + for _, c := range categories { + if int(c) == last { + return nil, fmt.Errorf("labels may not contain duplicate categories: %s", in) + } + last = int(c) + } + + return &Label{ + Prefix: prefixString, + Categories: categories, + }, nil +} + +func (labels *Label) String() string { + buf := bytes.Buffer{} + buf.WriteString(labels.Prefix) + for i, label := range labels.Categories { + if i != 0 { + buf.WriteRune(',') + } + buf.WriteRune('c') + buf.WriteString(strconv.Itoa(int(label))) + } + return buf.String() +} + +// Offset returns the rank of the provided categories in the +// co-lex rank operation (k is implicit) +func (categories Categories) Offset() uint64 { + k := len(categories) + r := uint64(0) + for i := 0; i < k; i++ { + r += binomial(uint(categories[i]), uint(k-i)) + } + return r +} + +// categoriesForOffset calculates the co-lex unrank operation +// on the combinatorial group defined by n, k, where rank is +// the offset. n is typically 1024 (the SELinux max) +func categoriesForOffset(offset uint64, n, k uint) Categories { + var categories Categories + for i := uint(0); i < k; i++ { + current := binomial(n, k-i) + for current > offset { + n-- + current = binomial(n, k-i) + } + categories = append(categories, uint16(n)) + offset = offset - current + } + sort.Sort(categories) + return categories +} + +type Categories []uint16 + +func (c Categories) Len() int { return len(c) } +func (c Categories) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c Categories) Less(i, j int) bool { + return c[i] > c[j] +} + +func binomial(n, k uint) uint64 { + if n < k { + return 0 + } + if k == n { + return 1 + } + r := uint64(1) + for d := uint(1); d <= k; d++ { + r *= uint64(n) + r /= uint64(d) + n-- + } + return r +} + +type Range struct { + prefix string + n uint + k uint +} + +// NewRange describes an SELinux category range, where prefix may include +// the user, type, role, and level of the range, and n and k represent the +// highest category c0 to c(N-1) and k represents the number of labels to use. +// A range can be used to check whether a given label matches the range. +func NewRange(prefix string, n, k uint) (*Range, error) { + if n == 0 { + return nil, fmt.Errorf("label max value must be a positive integer") + } + if k == 0 { + return nil, fmt.Errorf("label length must be a positive integer") + } + return &Range{ + prefix: prefix, + n: n, + k: k, + }, nil +} + +// ParseRange converts a string value representing an SELinux category +// range into a Range object, extracting the prefix -- which may include the +// user, type, and role of the range, the number of labels to use, and the +// maximum category to use. The input string is expected to be in the format: +// +// /[,] +// +// If the maximum category is not specified, it is defaulted to the maximum +// number of SELinux categories (1024). +func ParseRange(in string) (*Range, error) { + seg := strings.SplitN(in, "/", 2) + if len(seg) != 2 { + return nil, fmt.Errorf("range not in the format \"/[,]\"") + } + prefix := seg[0] + n := maxCategories + size := strings.SplitN(seg[1], ",", 2) + k, err := strconv.Atoi(size[0]) + if err != nil { + return nil, fmt.Errorf("range not in the format \"/[,]\"") + } + if len(size) > 1 { + max, err := strconv.Atoi(size[1]) + if err != nil { + return nil, fmt.Errorf("range not in the format \"/[,]\"") + } + n = max + } + if k > 5 { + return nil, fmt.Errorf("range may not include more than 5 labels") + } + if n > maxCategories { + return nil, fmt.Errorf("range may not include more than %d categories", maxCategories) + } + return NewRange(prefix, uint(n), uint(k)) +} + +func (r *Range) Size() uint64 { + return binomial(r.n, uint(r.k)) +} + +func (r *Range) String() string { + if r.n == maxCategories { + return fmt.Sprintf("%s/%d", r.prefix, r.k) + } + return fmt.Sprintf("%s/%d,%d", r.prefix, r.k, r.n) +} + +func (r *Range) LabelAt(offset uint64) (*Label, bool) { + label, err := NewLabel(r.prefix, offset, r.k) + return label, err == nil +} + +func (r *Range) Contains(label *Label) bool { + if label.Prefix != r.prefix { + return false + } + if len(label.Categories) != int(r.k) { + return false + } + for _, i := range label.Categories { + if i >= uint16(r.n) { + return false + } + } + return true +} + +func (r *Range) Offset(label *Label) (bool, uint64) { + if !r.Contains(label) { + return false, 0 + } + return true, label.Offset() +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/security/uidallocator/allocator.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/security/uidallocator/allocator.go new file mode 100644 index 0000000000..d66dbf3743 --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/security/uidallocator/allocator.go @@ -0,0 +1,151 @@ +package uidallocator + +import ( + "errors" + "fmt" + + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/registry/core/service/allocator" + + "github.com/openshift/library-go/pkg/security/uid" +) + +// Interface manages the allocation of ports out of a range. Interface +// should be threadsafe. +type Interface interface { + Allocate(uid.Block) error + AllocateNext() (uid.Block, error) + Release(uid.Block) error +} + +var ( + ErrFull = errors.New("range is full") + ErrNotInRange = errors.New("provided UID range is not in the valid range") + ErrAllocated = errors.New("provided UID range is already allocated") + ErrMismatchedRange = errors.New("the provided UID range does not match the current UID range") +) + +type Allocator struct { + r *uid.Range + alloc allocator.Interface +} + +// Allocator implements Interface and Snapshottable +var _ Interface = &Allocator{} + +// New creates a Allocator over a UID range, calling factory to construct the backing store. +func New(r *uid.Range, factory allocator.AllocatorFactory) (*Allocator, error) { + alloc, err := factory(int(r.Size()), r.String()) + if err != nil { + return nil, err + } + return &Allocator{ + r: r, + alloc: alloc, + }, err +} + +// NewInMemory creates an in-memory Allocator +func NewInMemory(r *uid.Range) (*Allocator, error) { + factory := func(max int, rangeSpec string) (allocator.Interface, error) { + return allocator.NewContiguousAllocationMap(max, rangeSpec), nil + } + return New(r, factory) +} + +// Free returns the count of port left in the range. +func (r *Allocator) Free() int { + return r.alloc.Free() +} + +// Allocate attempts to reserve the provided block. ErrNotInRange or +// ErrAllocated will be returned if the block is not valid for this range +// or has already been reserved. ErrFull will be returned if there +// are no blocks left. +func (r *Allocator) Allocate(block uid.Block) error { + ok, offset := r.contains(block) + if !ok { + return ErrNotInRange + } + + allocated, err := r.alloc.Allocate(int(offset)) + if err != nil { + return err + } + if !allocated { + return ErrAllocated + } + return nil +} + +// AllocateNext reserves one of the ports from the pool. ErrFull may +// be returned if there are no ports left. +func (r *Allocator) AllocateNext() (uid.Block, error) { + offset, ok, err := r.alloc.AllocateNext() + if err != nil { + return uid.Block{}, err + } + if !ok { + return uid.Block{}, ErrFull + } + block, ok := r.r.BlockAt(uint32(offset)) + if !ok { + return uid.Block{}, ErrNotInRange + } + return block, nil +} + +// Release releases the port back to the pool. Releasing an +// unallocated port or a port out of the range is a no-op and +// returns no error. +func (r *Allocator) Release(block uid.Block) error { + ok, offset := r.contains(block) + if !ok { + // TODO: log a warning + return nil + } + + return r.alloc.Release(int(offset)) +} + +// Has returns true if the provided port is already allocated and a call +// to Allocate(block) would fail with ErrAllocated. +func (r *Allocator) Has(block uid.Block) bool { + ok, offset := r.contains(block) + if !ok { + return false + } + + return r.alloc.Has(int(offset)) +} + +// Snapshot saves the current state of the pool. +func (r *Allocator) Snapshot(dst *api.RangeAllocation) error { + snapshottable, ok := r.alloc.(allocator.Snapshottable) + if !ok { + return fmt.Errorf("not a snapshottable allocator") + } + rangeString, data := snapshottable.Snapshot() + dst.Range = rangeString + dst.Data = data + return nil +} + +// Restore restores the pool to the previously captured state. ErrMismatchedNetwork +// is returned if the provided port range doesn't exactly match the previous range. +func (r *Allocator) Restore(into *uid.Range, data []byte) error { + if into.String() != r.r.String() { + return ErrMismatchedRange + } + snapshottable, ok := r.alloc.(allocator.Snapshottable) + if !ok { + return fmt.Errorf("not a snapshottable allocator") + } + return snapshottable.Restore(into.String(), data) +} + +// contains returns true and the offset if the block is in the range (and aligned), and false +// and nil otherwise. +func (r *Allocator) contains(block uid.Block) (bool, uint32) { + return r.r.Offset(block) +} diff --git a/vendor/github.com/openshift/cluster-policy-controller/pkg/version/version.go b/vendor/github.com/openshift/cluster-policy-controller/pkg/version/version.go new file mode 100644 index 0000000000..96d352f0ad --- /dev/null +++ b/vendor/github.com/openshift/cluster-policy-controller/pkg/version/version.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "runtime" + + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/apimachinery/pkg/version" +) + +var ( + // commitFromGit is a constant representing the source version that + // generated this build. It should be set during build via -ldflags. + commitFromGit string + // versionFromGit is a constant representing the version tag that + // generated this build. It should be set during build via -ldflags. + versionFromGit = "unknown" + // major version + majorFromGit string + // minor version + minorFromGit string + // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') + buildDate string + // state of git tree, either "clean" or "dirty" + gitTreeState string +) + +// Get returns the overall codebase version. It's for detecting +// what code a binary was built from. +func Get() version.Info { + return version.Info{ + Major: majorFromGit, + Minor: minorFromGit, + GitCommit: commitFromGit, + GitVersion: versionFromGit, + GitTreeState: gitTreeState, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} + +func init() { + buildInfo := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "openshift_build_info", + Help: "A metric with a constant '1' value labeled by major, minor, git commit & git version from which OpenShift was built.", + }, + []string{"major", "minor", "gitCommit", "gitVersion"}, + ) + buildInfo.WithLabelValues(majorFromGit, minorFromGit, commitFromGit, versionFromGit).Set(1) + + // we're ok with an error here for now because test-integration illegally runs the same process + prometheus.Register(buildInfo) +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go index ea35758197..fa5d8d79f7 100644 --- a/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go +++ b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go @@ -84,14 +84,28 @@ func ToConfigMapLeaderElection(clientConfig *rest.Config, config configv1.Leader func LeaderElectionDefaulting(config configv1.LeaderElection, defaultNamespace, defaultName string) configv1.LeaderElection { ret := *(&config).DeepCopy() + // We want to be able to tolerate 60s of kube-apiserver disruption without causing pod restarts. + // We want the graceful lease re-acquisition fairly quick to avoid waits on new deployments and other rollouts. + // We want a single set of guidance for nearly every lease in openshift. If you're special, we'll let you know. + // 1. clock skew tolerance is leaseDuration-renewDeadline == 30s + // 2. kube-apiserver downtime tolerance is == 78s + // lastRetry=floor(renewDeadline/retryPeriod)*retryPeriod == 104 + // downtimeTolerance = lastRetry-retryPeriod == 78s + // 3. worst non-graceful lease acquisition is leaseDuration+retryPeriod == 163s + // 4. worst graceful lease acquisition is retryPeriod == 26s if ret.LeaseDuration.Duration == 0 { - ret.LeaseDuration.Duration = 60 * time.Second + ret.LeaseDuration.Duration = 137 * time.Second } + if ret.RenewDeadline.Duration == 0 { - ret.RenewDeadline.Duration = 35 * time.Second + // this gives 107/26=4 retries and allows for 137-107=30 seconds of clock skew + // if the kube-apiserver is unavailable for 60s starting just before t=26 (the first renew), + // then we will retry on 26s intervals until t=104 (kube-apiserver came back up at 86), and there will + // be 33 seconds of extra time before the lease is lost. + ret.RenewDeadline.Duration = 107 * time.Second } if ret.RetryPeriod.Duration == 0 { - ret.RetryPeriod.Duration = 10 * time.Second + ret.RetryPeriod.Duration = 26 * time.Second } if len(ret.Namespace) == 0 { if len(defaultNamespace) > 0 { diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go new file mode 100644 index 0000000000..08c829c22b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go @@ -0,0 +1,372 @@ +package controllercmd + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "strings" + "sync" + "time" + + configv1 "github.com/openshift/api/config/v1" + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" + "github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer" + "github.com/openshift/library-go/pkg/config/client" + "github.com/openshift/library-go/pkg/config/configdefaults" + leaderelectionconverter "github.com/openshift/library-go/pkg/config/leaderelection" + "github.com/openshift/library-go/pkg/config/serving" + "github.com/openshift/library-go/pkg/controller/fileobserver" + "github.com/openshift/library-go/pkg/operator/events" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/version" + "k8s.io/apiserver/pkg/authorization/union" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/record" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog/v2" +) + +// StartFunc is the function to call on leader election start +type StartFunc func(context.Context, *ControllerContext) error + +type ControllerContext struct { + ComponentConfig *unstructured.Unstructured + + // KubeConfig provides the REST config with no content type (it will default to JSON). + // Use this config for CR resources. + KubeConfig *rest.Config + + // ProtoKubeConfig provides the REST config with "application/vnd.kubernetes.protobuf,application/json" content type. + // Note that this config might not be safe for CR resources, instead it should be used for other resources. + ProtoKubeConfig *rest.Config + + // EventRecorder is used to record events in controllers. + EventRecorder events.Recorder + + // Server is the GenericAPIServer serving healthz checks and debug info + Server *genericapiserver.GenericAPIServer + + // Namespace where the operator runs. Either specified on the command line or autodetected. + OperatorNamespace string +} + +// defaultObserverInterval specifies the default interval that file observer will do rehash the files it watches and react to any changes +// in those files. +var defaultObserverInterval = 5 * time.Second + +// ControllerBuilder allows the construction of an controller in optional pieces. +type ControllerBuilder struct { + kubeAPIServerConfigFile *string + clientOverrides *client.ClientConnectionOverrides + leaderElection *configv1.LeaderElection + fileObserver fileobserver.Observer + fileObserverReactorFn func(file string, action fileobserver.ActionType) error + eventRecorderOptions record.CorrelatorOptions + componentOwnerReference *corev1.ObjectReference + + startFunc StartFunc + componentName string + componentNamespace string + instanceIdentity string + observerInterval time.Duration + + servingInfo *configv1.HTTPServingInfo + authenticationConfig *operatorv1alpha1.DelegatedAuthentication + authorizationConfig *operatorv1alpha1.DelegatedAuthorization + healthChecks []healthz.HealthChecker + + versionInfo *version.Info + + // nonZeroExitFn takes a function that exit the process with non-zero code. + // This stub exists for unit test where we can check if the graceful termination work properly. + // Default function will klog.Warning(args) and os.Exit(1). + nonZeroExitFn func(args ...interface{}) +} + +// NewController returns a builder struct for constructing the command you want to run +func NewController(componentName string, startFunc StartFunc) *ControllerBuilder { + return &ControllerBuilder{ + startFunc: startFunc, + componentName: componentName, + observerInterval: defaultObserverInterval, + nonZeroExitFn: func(args ...interface{}) { + klog.Warning(args...) + os.Exit(1) + }, + } +} + +// WithRestartOnChange will enable a file observer controller loop that observes changes into specified files. If a change to a file is detected, +// the specified channel will be closed (allowing to graceful shutdown for other channels). +func (b *ControllerBuilder) WithRestartOnChange(stopCh chan<- struct{}, startingFileContent map[string][]byte, files ...string) *ControllerBuilder { + if len(files) == 0 { + return b + } + if b.fileObserver == nil { + observer, err := fileobserver.NewObserver(b.observerInterval) + if err != nil { + panic(err) + } + b.fileObserver = observer + } + var once sync.Once + + b.fileObserverReactorFn = func(filename string, action fileobserver.ActionType) error { + once.Do(func() { + klog.Warning(fmt.Sprintf("Restart triggered because of %s", action.String(filename))) + close(stopCh) + }) + return nil + } + + b.fileObserver.AddReactor(b.fileObserverReactorFn, startingFileContent, files...) + return b +} + +func (b *ControllerBuilder) WithComponentNamespace(ns string) *ControllerBuilder { + b.componentNamespace = ns + return b +} + +// WithLeaderElection adds leader election options +func (b *ControllerBuilder) WithLeaderElection(leaderElection configv1.LeaderElection, defaultNamespace, defaultName string) *ControllerBuilder { + if leaderElection.Disable { + return b + } + + defaulted := leaderelectionconverter.LeaderElectionDefaulting(leaderElection, defaultNamespace, defaultName) + b.leaderElection = &defaulted + return b +} + +// WithVersion accepts a getting that provide binary version information that is used to report build_info information to prometheus +func (b *ControllerBuilder) WithVersion(info version.Info) *ControllerBuilder { + b.versionInfo = &info + return b +} + +// WithServer adds a server that provides metrics and healthz +func (b *ControllerBuilder) WithServer(servingInfo configv1.HTTPServingInfo, authenticationConfig operatorv1alpha1.DelegatedAuthentication, authorizationConfig operatorv1alpha1.DelegatedAuthorization) *ControllerBuilder { + b.servingInfo = servingInfo.DeepCopy() + configdefaults.SetRecommendedHTTPServingInfoDefaults(b.servingInfo) + b.authenticationConfig = &authenticationConfig + b.authorizationConfig = &authorizationConfig + return b +} + +// WithHealthChecks adds a list of healthchecks to the server +func (b *ControllerBuilder) WithHealthChecks(healthChecks ...healthz.HealthChecker) *ControllerBuilder { + b.healthChecks = append(b.healthChecks, healthChecks...) + return b +} + +// WithKubeConfigFile sets an optional kubeconfig file. inclusterconfig will be used if filename is empty +func (b *ControllerBuilder) WithKubeConfigFile(kubeConfigFilename string, defaults *client.ClientConnectionOverrides) *ControllerBuilder { + b.kubeAPIServerConfigFile = &kubeConfigFilename + b.clientOverrides = defaults + return b +} + +// WithInstanceIdentity sets the instance identity to use if you need something special. The default is just a UID which is +// usually fine for a pod. +func (b *ControllerBuilder) WithInstanceIdentity(identity string) *ControllerBuilder { + b.instanceIdentity = identity + return b +} + +// WithEventRecorderOptions allows to override the default Kubernetes event recorder correlator options. +// This is needed if the binary is sending a lot of events. +// Using events.DefaultOperatorEventRecorderOptions here makes a good default for normal operator binary. +func (b *ControllerBuilder) WithEventRecorderOptions(options record.CorrelatorOptions) *ControllerBuilder { + b.eventRecorderOptions = options + return b +} + +// WithComponentOwnerReference overrides controller reference resolution for event recording +func (b *ControllerBuilder) WithComponentOwnerReference(reference *corev1.ObjectReference) *ControllerBuilder { + b.componentOwnerReference = reference + return b +} + +// Run starts your controller for you. It uses leader election if you asked, otherwise it directly calls you +func (b *ControllerBuilder) Run(ctx context.Context, config *unstructured.Unstructured) error { + clientConfig, err := b.getClientConfig() + if err != nil { + return err + } + + if b.fileObserver != nil { + go b.fileObserver.Run(ctx.Done()) + } + + kubeClient := kubernetes.NewForConfigOrDie(clientConfig) + namespace, err := b.getComponentNamespace() + if err != nil { + klog.Warningf("unable to identify the current namespace for events: %v", err) + } + controllerRef := b.componentOwnerReference + + if controllerRef == nil { + controllerRef, err = events.GetControllerReferenceForCurrentPod(kubeClient, namespace, nil) + if err != nil { + klog.Warningf("unable to get owner reference (falling back to namespace): %v", err) + } + } + eventRecorder := events.NewKubeRecorderWithOptions(kubeClient.CoreV1().Events(namespace), b.eventRecorderOptions, b.componentName, controllerRef) + + utilruntime.PanicHandlers = append(utilruntime.PanicHandlers, func(r interface{}) { + eventRecorder.Warningf(fmt.Sprintf("%sPanic", strings.Title(b.componentName)), "Panic observed: %v", r) + }) + + // if there is file observer defined for this command, add event into default reaction function. + if b.fileObserverReactorFn != nil { + originalFileObserverReactorFn := b.fileObserverReactorFn + b.fileObserverReactorFn = func(file string, action fileobserver.ActionType) error { + eventRecorder.Warningf("OperatorRestart", "Restarted because of %s", action.String(file)) + return originalFileObserverReactorFn(file, action) + } + } + + // report the binary version metrics to prometheus + if b.versionInfo != nil { + buildInfo := metrics.NewGaugeVec( + &metrics.GaugeOpts{ + Name: strings.Replace(namespace, "-", "_", -1) + "_build_info", + Help: "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, " + + "and compiler from which " + b.componentName + " was built, and platform on which it is running.", + StabilityLevel: metrics.ALPHA, + }, + []string{"major", "minor", "gitVersion", "gitCommit", "gitTreeState", "buildDate", "goVersion", "compiler", "platform"}, + ) + legacyregistry.MustRegister(buildInfo) + buildInfo.WithLabelValues(b.versionInfo.Major, b.versionInfo.Minor, b.versionInfo.GitVersion, b.versionInfo.GitCommit, b.versionInfo.GitTreeState, b.versionInfo.BuildDate, b.versionInfo.GoVersion, + b.versionInfo.Compiler, b.versionInfo.Platform).Set(1) + klog.Infof("%s version %s-%s", b.componentName, b.versionInfo.GitVersion, b.versionInfo.GitCommit) + } + + kubeConfig := "" + if b.kubeAPIServerConfigFile != nil { + kubeConfig = *b.kubeAPIServerConfigFile + } + + var server *genericapiserver.GenericAPIServer + if b.servingInfo != nil { + serverConfig, err := serving.ToServerConfig(ctx, *b.servingInfo, *b.authenticationConfig, *b.authorizationConfig, kubeConfig) + if err != nil { + return err + } + serverConfig.Authorization.Authorizer = union.New( + // prefix the authorizer with the permissions for metrics scraping which are well known. + // openshift RBAC policy will always allow this user to read metrics. + hardcodedauthorizer.NewHardCodedMetricsAuthorizer(), + serverConfig.Authorization.Authorizer, + ) + serverConfig.HealthzChecks = append(serverConfig.HealthzChecks, b.healthChecks...) + + server, err = serverConfig.Complete(nil).New(b.componentName, genericapiserver.NewEmptyDelegate()) + if err != nil { + return err + } + + go func() { + if err := server.PrepareRun().Run(ctx.Done()); err != nil { + klog.Fatal(err) + } + klog.Info("server exited") + }() + } + + protoConfig := rest.CopyConfig(clientConfig) + protoConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + protoConfig.ContentType = "application/vnd.kubernetes.protobuf" + + controllerContext := &ControllerContext{ + ComponentConfig: config, + KubeConfig: clientConfig, + ProtoKubeConfig: protoConfig, + EventRecorder: eventRecorder, + Server: server, + OperatorNamespace: namespace, + } + + if b.leaderElection == nil { + if err := b.startFunc(ctx, controllerContext); err != nil { + return err + } + return nil + } + + // ensure blocking TCP connections don't block the leader election + leaderConfig := rest.CopyConfig(protoConfig) + leaderConfig.Timeout = b.leaderElection.RenewDeadline.Duration + + leaderElection, err := leaderelectionconverter.ToConfigMapLeaderElection(leaderConfig, *b.leaderElection, b.componentName, b.instanceIdentity) + if err != nil { + return err + } + + // 10s is the graceful termination time we give the controllers to finish their workers. + // when this time pass, we exit with non-zero code, killing all controller workers. + // NOTE: The pod must set the termination graceful time. + leaderElection.Callbacks.OnStartedLeading = b.getOnStartedLeadingFunc(controllerContext, 10*time.Second) + + leaderelection.RunOrDie(ctx, leaderElection) + return nil +} + +func (b ControllerBuilder) getOnStartedLeadingFunc(controllerContext *ControllerContext, gracefulTerminationDuration time.Duration) func(ctx context.Context) { + return func(ctx context.Context) { + stoppedCh := make(chan struct{}) + go func() { + defer close(stoppedCh) + if err := b.startFunc(ctx, controllerContext); err != nil { + b.nonZeroExitFn(fmt.Sprintf("graceful termination failed, controllers failed with error: %v", err)) + } + }() + + select { + case <-ctx.Done(): // context closed means the process likely received signal to terminate + controllerContext.EventRecorder.Shutdown() + case <-stoppedCh: + // if context was not cancelled (it is not "done"), but the startFunc terminated, it means it terminated prematurely + // when this happen, it means the controllers terminated without error. + if ctx.Err() == nil { + b.nonZeroExitFn("graceful termination failed, controllers terminated prematurely") + } + } + + select { + case <-time.After(gracefulTerminationDuration): // when context was closed above, give controllers extra time to terminate gracefully + b.nonZeroExitFn(fmt.Sprintf("graceful termination failed, some controllers failed to shutdown in %s", gracefulTerminationDuration)) + case <-stoppedCh: // stoppedCh here means the controllers finished termination and we exit 0 + } + } +} + +func (b *ControllerBuilder) getComponentNamespace() (string, error) { + if len(b.componentNamespace) > 0 { + return b.componentNamespace, nil + } + nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err != nil { + return "openshift-config-managed", err + } + return string(nsBytes), nil +} + +func (b *ControllerBuilder) getClientConfig() (*rest.Config, error) { + kubeconfig := "" + if b.kubeAPIServerConfigFile != nil { + kubeconfig = *b.kubeAPIServerConfigFile + } + + return client.GetKubeConfigOrInClusterConfig(kubeconfig, b.clientOverrides) +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go new file mode 100644 index 0000000000..5196e350a5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go @@ -0,0 +1,295 @@ +package controllercmd + +import ( + "context" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "time" + + "github.com/spf13/cobra" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/version" + "k8s.io/apiserver/pkg/server" + "k8s.io/component-base/logs" + "k8s.io/klog/v2" + + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" + + "github.com/openshift/library-go/pkg/config/configdefaults" + "github.com/openshift/library-go/pkg/controller/fileobserver" + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/serviceability" + + // for metrics + _ "github.com/openshift/library-go/pkg/controller/metrics" +) + +// ControllerCommandConfig holds values required to construct a command to run. +type ControllerCommandConfig struct { + componentName string + startFunc StartFunc + version version.Info + + basicFlags *ControllerFlags + + // DisableServing disables serving metrics, debug and health checks and so on. + DisableServing bool + + // DisableLeaderElection allows leader election to be suspended + DisableLeaderElection bool + + ComponentOwnerReference *corev1.ObjectReference +} + +// NewControllerConfig returns a new ControllerCommandConfig which can be used to wire up all the boiler plate of a controller +// TODO add more methods around wiring health checks and the like +func NewControllerCommandConfig(componentName string, version version.Info, startFunc StartFunc) *ControllerCommandConfig { + return &ControllerCommandConfig{ + startFunc: startFunc, + componentName: componentName, + version: version, + + basicFlags: NewControllerFlags(), + + DisableServing: false, + DisableLeaderElection: false, + } +} + +// WithComponentOwnerReference overrides controller reference resolution for event recording +func (c *ControllerCommandConfig) WithComponentOwnerReference(reference *corev1.ObjectReference) *ControllerCommandConfig { + c.ComponentOwnerReference = reference + return c +} + +// NewCommand returns a new command that a caller must set the Use and Descriptions on. It wires default log, profiling, +// leader election and other "normal" behaviors. +// Deprecated: Use the NewCommandWithContext instead, this is here to be less disturbing for existing usages. +func (c *ControllerCommandConfig) NewCommand() *cobra.Command { + return c.NewCommandWithContext(context.TODO()) + +} + +// NewCommandWithContext returns a new command that a caller must set the Use and Descriptions on. It wires default log, profiling, +// leader election and other "normal" behaviors. +// The context passed will be passed down to controller loops and observers and cancelled on SIGTERM and SIGINT signals. +func (c *ControllerCommandConfig) NewCommandWithContext(ctx context.Context) *cobra.Command { + cmd := &cobra.Command{ + Run: func(cmd *cobra.Command, args []string) { + // boiler plate for the "normal" command + rand.Seed(time.Now().UTC().UnixNano()) + logs.InitLogs() + + // handle SIGTERM and SIGINT by cancelling the context. + shutdownCtx, cancel := context.WithCancel(ctx) + shutdownHandler := server.SetupSignalHandler() + go func() { + defer cancel() + <-shutdownHandler + klog.Infof("Received SIGTERM or SIGINT signal, shutting down controller.") + }() + + defer logs.FlushLogs() + defer serviceability.BehaviorOnPanic(os.Getenv("OPENSHIFT_ON_PANIC"), c.version)() + defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop() + + serviceability.StartProfiler() + + if err := c.basicFlags.Validate(); err != nil { + klog.Fatal(err) + } + + ctx, terminate := context.WithCancel(shutdownCtx) + defer terminate() + + if len(c.basicFlags.TerminateOnFiles) > 0 { + // setup file observer to terminate when given files change + obs, err := fileobserver.NewObserver(10 * time.Second) + if err != nil { + klog.Fatal(err) + } + files := map[string][]byte{} + for _, fn := range c.basicFlags.TerminateOnFiles { + fileBytes, err := ioutil.ReadFile(fn) + if err != nil { + klog.Warningf("Unable to read initial content of %q: %v", fn, err) + continue // intentionally ignore errors + } + files[fn] = fileBytes + } + obs.AddReactor(func(filename string, action fileobserver.ActionType) error { + klog.Infof("exiting because %q changed", filename) + terminate() + return nil + }, files, c.basicFlags.TerminateOnFiles...) + + go obs.Run(shutdownHandler) + } + + if err := c.StartController(ctx); err != nil { + klog.Fatal(err) + } + }, + } + + c.basicFlags.AddFlags(cmd) + + return cmd +} + +// Config returns the configuration of this command. Use StartController if you don't need to customize the default operator. +// This method does not modify the receiver. +func (c *ControllerCommandConfig) Config() (*unstructured.Unstructured, *operatorv1alpha1.GenericOperatorConfig, []byte, error) { + configContent, unstructuredConfig, err := c.basicFlags.ToConfigObj() + if err != nil { + return nil, nil, nil, err + } + config := &operatorv1alpha1.GenericOperatorConfig{} + if unstructuredConfig != nil { + // make a copy we can mutate + configCopy := unstructuredConfig.DeepCopy() + // force the config to our version to read it + configCopy.SetGroupVersionKind(operatorv1alpha1.GroupVersion.WithKind("GenericOperatorConfig")) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(configCopy.Object, config); err != nil { + return nil, nil, nil, err + } + } + return unstructuredConfig, config, configContent, nil +} + +func hasServiceServingCerts(certDir string) bool { + if _, err := os.Stat(filepath.Join(certDir, "tls.crt")); os.IsNotExist(err) { + return false + } + if _, err := os.Stat(filepath.Join(certDir, "tls.key")); os.IsNotExist(err) { + return false + } + return true +} + +// AddDefaultRotationToConfig starts the provided builder with the default rotation set (config + serving info). Use StartController if +// you do not need to customize the controller builder. This method modifies config with self-signed default cert locations if +// necessary. +func (c *ControllerCommandConfig) AddDefaultRotationToConfig(config *operatorv1alpha1.GenericOperatorConfig, configContent []byte) (map[string][]byte, []string, error) { + certDir := "/var/run/secrets/serving-cert" + + observedFiles := []string{ + // We observe these, so we they are created or modified by service serving cert signer, we can react and restart the process + // that will pick these up instead of generating the self-signed certs. + // NOTE: We are not observing the temporary, self-signed certificates. + filepath.Join(certDir, "tls.crt"), + filepath.Join(certDir, "tls.key"), + } + // startingFileContent holds hardcoded starting content. If we generate our own certificates, then we want to specify empty + // content to avoid a starting race. When we consume them, the race is really about as good as we can do since we don't know + // what's actually been read. + startingFileContent := map[string][]byte{} + + // Since provision of a config filename is optional, only observe when one is provided. + if len(c.basicFlags.ConfigFile) > 0 { + observedFiles = append(observedFiles, c.basicFlags.ConfigFile) + startingFileContent[c.basicFlags.ConfigFile] = configContent + } + + // if we don't have any serving cert/key pairs specified and the defaults are not present, generate a self-signed set + // TODO maybe this should be optional? It's a little difficult to come up with a scenario where this is worse than nothing though. + if len(config.ServingInfo.CertFile) == 0 && len(config.ServingInfo.KeyFile) == 0 { + servingInfoCopy := config.ServingInfo.DeepCopy() + configdefaults.SetRecommendedHTTPServingInfoDefaults(servingInfoCopy) + + if hasServiceServingCerts(certDir) { + klog.Infof("Using service-serving-cert provided certificates") + config.ServingInfo.CertFile = filepath.Join(certDir, "tls.crt") + config.ServingInfo.KeyFile = filepath.Join(certDir, "tls.key") + } else { + klog.Warningf("Using insecure, self-signed certificates") + // If we generate our own certificates, then we want to specify empty content to avoid a starting race. This way, + // if any change comes in, we will properly restart + startingFileContent[filepath.Join(certDir, "tls.crt")] = []byte{} + startingFileContent[filepath.Join(certDir, "tls.key")] = []byte{} + + temporaryCertDir, err := ioutil.TempDir("", "serving-cert-") + if err != nil { + return nil, nil, err + } + signerName := fmt.Sprintf("%s-signer@%d", c.componentName, time.Now().Unix()) + ca, err := crypto.MakeSelfSignedCA( + filepath.Join(temporaryCertDir, "serving-signer.crt"), + filepath.Join(temporaryCertDir, "serving-signer.key"), + filepath.Join(temporaryCertDir, "serving-signer.serial"), + signerName, + 0, + ) + if err != nil { + return nil, nil, err + } + + // force the values to be set to where we are writing the certs + config.ServingInfo.CertFile = filepath.Join(temporaryCertDir, "tls.crt") + config.ServingInfo.KeyFile = filepath.Join(temporaryCertDir, "tls.key") + // nothing can trust this, so we don't really care about hostnames + servingCert, err := ca.MakeServerCert(sets.NewString("localhost"), 30) + if err != nil { + return nil, nil, err + } + if err := servingCert.WriteCertConfigFile(config.ServingInfo.CertFile, config.ServingInfo.KeyFile); err != nil { + return nil, nil, err + } + } + } + return startingFileContent, observedFiles, nil +} + +// StartController runs the controller. This is the recommend entrypoint when you don't need +// to customize the builder. +func (c *ControllerCommandConfig) StartController(ctx context.Context) error { + unstructuredConfig, config, configContent, err := c.Config() + if err != nil { + return err + } + + startingFileContent, observedFiles, err := c.AddDefaultRotationToConfig(config, configContent) + if err != nil { + return err + } + + if len(c.basicFlags.BindAddress) != 0 { + config.ServingInfo.BindAddress = c.basicFlags.BindAddress + } + + exitOnChangeReactorCh := make(chan struct{}) + controllerCtx, cancel := context.WithCancel(ctx) + go func() { + select { + case <-exitOnChangeReactorCh: + cancel() + case <-ctx.Done(): + cancel() + } + }() + + config.LeaderElection.Disable = c.DisableLeaderElection + + builder := NewController(c.componentName, c.startFunc). + WithKubeConfigFile(c.basicFlags.KubeConfigFile, nil). + WithComponentNamespace(c.basicFlags.Namespace). + WithLeaderElection(config.LeaderElection, c.basicFlags.Namespace, c.componentName+"-lock"). + WithVersion(c.version). + WithEventRecorderOptions(events.RecommendedClusterSingletonCorrelatorOptions()). + WithRestartOnChange(exitOnChangeReactorCh, startingFileContent, observedFiles...). + WithComponentOwnerReference(c.ComponentOwnerReference) + + if !c.DisableServing { + builder = builder.WithServer(config.ServingInfo, config.Authentication, config.Authorization) + } + + return builder.Run(controllerCtx, unstructuredConfig) +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go new file mode 100644 index 0000000000..d83fe7d451 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go @@ -0,0 +1,138 @@ +package controllercmd + +import ( + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/spf13/cobra" + + "github.com/openshift/library-go/pkg/config/client" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + kyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/rest" +) + +// ControllerFlags provides the "normal" controller flags +type ControllerFlags struct { + // ConfigFile hold the configfile to load + ConfigFile string + // KubeConfigFile points to a kubeconfig file if you don't want to use the in cluster config + KubeConfigFile string + // Namespace points to a base namespace for the controller and related events + Namespace string + // BindAddress is the ip:port to serve on + BindAddress string + // TerminateOnFiles is a list of files. If any of these changes, the process terminates. + TerminateOnFiles []string +} + +// NewControllerFlags returns flags with default values set +func NewControllerFlags() *ControllerFlags { + return &ControllerFlags{} +} + +// Validate makes sure the required flags are specified and no illegal combinations are found +func (o *ControllerFlags) Validate() error { + // everything is optional currently + return nil +} + +// AddFlags register and binds the default flags +func (f *ControllerFlags) AddFlags(cmd *cobra.Command) { + flags := cmd.Flags() + // This command only supports reading from config + flags.StringVar(&f.ConfigFile, "config", f.ConfigFile, "Location of the master configuration file to run from.") + cmd.MarkFlagFilename("config", "yaml", "yml") + flags.StringVar(&f.KubeConfigFile, "kubeconfig", f.KubeConfigFile, "Location of the master configuration file to run from.") + cmd.MarkFlagFilename("kubeconfig", "kubeconfig") + flags.StringVar(&f.Namespace, "namespace", f.Namespace, "Namespace where the controller is running. Auto-detected if run in cluster.") + flags.StringVar(&f.BindAddress, "listen", f.BindAddress, "The ip:port to serve on.") + flags.StringArrayVar(&f.TerminateOnFiles, "terminate-on-files", f.TerminateOnFiles, "A list of files. If one of them changes, the process will terminate.") +} + +// ToConfigObj given completed flags, returns a config object for the flag that was specified. +// TODO versions goes away in 1.11 +func (f *ControllerFlags) ToConfigObj() ([]byte, *unstructured.Unstructured, error) { + // no file means empty, not err + if len(f.ConfigFile) == 0 { + return nil, nil, nil + } + + content, err := ioutil.ReadFile(f.ConfigFile) + if err != nil { + return nil, nil, err + } + // empty file means empty, not err + if len(content) == 0 { + return nil, nil, err + } + + data, err := kyaml.ToJSON(content) + if err != nil { + return nil, nil, err + } + uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, data) + if err != nil { + return nil, nil, err + } + + return content, uncastObj.(*unstructured.Unstructured), nil +} + +// ToClientConfig given completed flags, returns a rest.Config. overrides are optional +func (f *ControllerFlags) ToClientConfig(overrides *client.ClientConnectionOverrides) (*rest.Config, error) { + return client.GetKubeConfigOrInClusterConfig(f.KubeConfigFile, overrides) +} + +// ReadYAML decodes a runtime.Object from the provided scheme +// TODO versions goes away with more complete scheme in 1.11 +func ReadYAML(data []byte, configScheme *runtime.Scheme, versions ...schema.GroupVersion) (runtime.Object, error) { + data, err := kyaml.ToJSON(data) + if err != nil { + return nil, err + } + configCodecFactory := serializer.NewCodecFactory(configScheme) + obj, err := runtime.Decode(configCodecFactory.UniversalDecoder(versions...), data) + if err != nil { + return nil, captureSurroundingJSONForError("error reading config: ", data, err) + } + return obj, err +} + +// ReadYAMLFile read a file and decodes a runtime.Object from the provided scheme +func ReadYAMLFile(filename string, configScheme *runtime.Scheme, versions ...schema.GroupVersion) (runtime.Object, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + obj, err := ReadYAML(data, configScheme, versions...) + if err != nil { + return nil, fmt.Errorf("could not load config file %q due to an error: %v", filename, err) + } + return obj, err +} + +// TODO: we ultimately want a better decoder for JSON that allows us exact line numbers and better +// surrounding text description. This should be removed / replaced when that happens. +func captureSurroundingJSONForError(prefix string, data []byte, err error) error { + if syntaxErr, ok := err.(*json.SyntaxError); err != nil && ok { + offset := syntaxErr.Offset + begin := offset - 20 + if begin < 0 { + begin = 0 + } + end := offset + 20 + if end > int64(len(data)) { + end = int64(len(data)) + } + return fmt.Errorf("%s%v (found near '%s')", prefix, err, string(data[begin:end])) + } + if err != nil { + return fmt.Errorf("%s%v", prefix, err) + } + return err +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go new file mode 100644 index 0000000000..e12ae1bfca --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go @@ -0,0 +1,280 @@ +package factory + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/robfig/cron" + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/v1helpers" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// SyntheticRequeueError can be returned from sync() in case of forcing a sync() retry artificially. +// This can be also done by re-adding the key to queue, but this is cheaper and more convenient. +var SyntheticRequeueError = errors.New("synthetic requeue request") + +var defaultCacheSyncTimeout = 10 * time.Minute + +// baseController represents generic Kubernetes controller boiler-plate +type baseController struct { + name string + cachesToSync []cache.InformerSynced + sync func(ctx context.Context, controllerContext SyncContext) error + syncContext SyncContext + syncDegradedClient operatorv1helpers.OperatorClient + resyncEvery time.Duration + resyncSchedules []cron.Schedule + postStartHooks []PostStartHook + cacheSyncTimeout time.Duration +} + +var _ Controller = &baseController{} + +func (c baseController) Name() string { + return c.name +} + +type scheduledJob struct { + queue workqueue.RateLimitingInterface + name string +} + +func newScheduledJob(name string, queue workqueue.RateLimitingInterface) cron.Job { + return &scheduledJob{ + queue: queue, + name: name, + } +} + +func (s *scheduledJob) Run() { + klog.V(4).Infof("Triggering scheduled %q controller run", s.name) + s.queue.Add(DefaultQueueKey) +} + +func waitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) error { + klog.Infof("Waiting for caches to sync for %s", controllerName) + + if !cache.WaitForCacheSync(stopCh, cacheSyncs...) { + return fmt.Errorf("unable to sync caches for %s", controllerName) + } + + klog.Infof("Caches are synced for %s ", controllerName) + + return nil +} + +func (c *baseController) Run(ctx context.Context, workers int) { + // HandleCrash recovers panics + defer utilruntime.HandleCrash(c.degradedPanicHandler) + + // give caches 10 minutes to sync + cacheSyncCtx, cacheSyncCancel := context.WithTimeout(ctx, c.cacheSyncTimeout) + defer cacheSyncCancel() + err := waitForNamedCacheSync(c.name, cacheSyncCtx.Done(), c.cachesToSync...) + if err != nil { + select { + case <-ctx.Done(): + // Exit gracefully because the controller was requested to stop. + return + default: + // If caches did not sync after 10 minutes, it has taken oddly long and + // we should provide feedback. Since the control loops will never start, + // it is safer to exit with a good message than to continue with a dead loop. + // TODO: Consider making this behavior configurable. + klog.Exit(err) + } + } + + var workerWg sync.WaitGroup + defer func() { + defer klog.Infof("All %s workers have been terminated", c.name) + workerWg.Wait() + }() + + // queueContext is used to track and initiate queue shutdown + queueContext, queueContextCancel := context.WithCancel(context.TODO()) + + for i := 1; i <= workers; i++ { + klog.Infof("Starting #%d worker of %s controller ...", i, c.name) + workerWg.Add(1) + go func() { + defer func() { + klog.Infof("Shutting down worker of %s controller ...", c.name) + workerWg.Done() + }() + c.runWorker(queueContext) + }() + } + + // if scheduled run is requested, run the cron scheduler + if c.resyncSchedules != nil { + scheduler := cron.New() + for _, s := range c.resyncSchedules { + scheduler.Schedule(s, newScheduledJob(c.name, c.syncContext.Queue())) + } + scheduler.Start() + defer scheduler.Stop() + } + + // runPeriodicalResync is independent from queue + if c.resyncEvery > 0 { + workerWg.Add(1) + go func() { + defer workerWg.Done() + c.runPeriodicalResync(ctx, c.resyncEvery) + }() + } + + // run post-start hooks (custom triggers, etc.) + if len(c.postStartHooks) > 0 { + var hookWg sync.WaitGroup + defer func() { + hookWg.Wait() // wait for the post-start hooks + klog.Infof("All %s post start hooks have been terminated", c.name) + }() + for i := range c.postStartHooks { + hookWg.Add(1) + go func(index int) { + defer hookWg.Done() + if err := c.postStartHooks[index](ctx, c.syncContext); err != nil { + klog.Warningf("%s controller post start hook error: %v", c.name, err) + } + }(i) + } + } + + // Handle controller shutdown + + <-ctx.Done() // wait for controller context to be cancelled + c.syncContext.Queue().ShutDown() // shutdown the controller queue first + queueContextCancel() // cancel the queue context, which tell workers to initiate shutdown + + // Wait for all workers to finish their job. + // at this point the Run() can hang and caller have to implement the logic that will kill + // this controller (SIGKILL). + klog.Infof("Shutting down %s ...", c.name) +} + +func (c *baseController) Sync(ctx context.Context, syncCtx SyncContext) error { + return c.sync(ctx, syncCtx) +} + +func (c *baseController) runPeriodicalResync(ctx context.Context, interval time.Duration) { + if interval == 0 { + return + } + go wait.UntilWithContext(ctx, func(ctx context.Context) { + c.syncContext.Queue().Add(DefaultQueueKey) + }, interval) +} + +// runWorker runs a single worker +// The worker is asked to terminate when the passed context is cancelled and is given terminationGraceDuration time +// to complete its shutdown. +func (c *baseController) runWorker(queueCtx context.Context) { + wait.UntilWithContext( + queueCtx, + func(queueCtx context.Context) { + defer utilruntime.HandleCrash(c.degradedPanicHandler) + for { + select { + case <-queueCtx.Done(): + return + default: + c.processNextWorkItem(queueCtx) + } + } + }, + 1*time.Second) +} + +// reconcile wraps the sync() call and if operator client is set, it handle the degraded condition if sync() returns an error. +func (c *baseController) reconcile(ctx context.Context, syncCtx SyncContext) error { + err := c.sync(ctx, syncCtx) + degradedErr := c.reportDegraded(ctx, err) + if apierrors.IsNotFound(degradedErr) && management.IsOperatorRemovable() { + // The operator tolerates missing CR, therefore don't report it up. + return err + } + return degradedErr +} + +// degradedPanicHandler will go degraded on failures, then we should catch potential panics and covert them into bad status. +func (c *baseController) degradedPanicHandler(panicVal interface{}) { + if c.syncDegradedClient == nil { + // if we don't have a client for reporting degraded condition, then let the existing panic handler do the work + return + } + _ = c.reportDegraded(context.TODO(), fmt.Errorf("panic caught:\n%v", panicVal)) +} + +// reportDegraded updates status with an indication of degraded-ness +func (c *baseController) reportDegraded(ctx context.Context, reportedError error) error { + if c.syncDegradedClient == nil { + return reportedError + } + if reportedError != nil { + _, _, updateErr := v1helpers.UpdateStatus(c.syncDegradedClient, v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{ + Type: c.name + "Degraded", + Status: operatorv1.ConditionTrue, + Reason: "SyncError", + Message: reportedError.Error(), + })) + if updateErr != nil { + klog.Warningf("Updating status of %q failed: %v", c.Name(), updateErr) + } + return reportedError + } + _, _, updateErr := v1helpers.UpdateStatus(c.syncDegradedClient, + v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{ + Type: c.name + "Degraded", + Status: operatorv1.ConditionFalse, + Reason: "AsExpected", + })) + return updateErr +} + +func (c *baseController) processNextWorkItem(queueCtx context.Context) { + key, quit := c.syncContext.Queue().Get() + if quit { + return + } + defer c.syncContext.Queue().Done(key) + + syncCtx := c.syncContext.(syncContext) + var ok bool + syncCtx.queueKey, ok = key.(string) + if !ok { + utilruntime.HandleError(fmt.Errorf("%q controller failed to process key %q (not a string)", c.name, key)) + return + } + + if err := c.reconcile(queueCtx, syncCtx); err != nil { + if err == SyntheticRequeueError { + // logging this helps detecting wedged controllers with missing pre-requirements + klog.V(5).Infof("%q controller requested synthetic requeue with key %q", c.name, key) + } else { + if klog.V(4).Enabled() || key != "key" { + utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", c.name, key, err)) + } else { + utilruntime.HandleError(fmt.Errorf("%s reconciliation failed: %w", c.name, err)) + } + } + c.syncContext.Queue().AddRateLimited(key) + return + } + + c.syncContext.Queue().Forget(key) +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go new file mode 100644 index 0000000000..901fda133e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go @@ -0,0 +1,109 @@ +package factory + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + "github.com/openshift/library-go/pkg/operator/events" +) + +// syncContext implements SyncContext and provide user access to queue and object that caused +// the sync to be triggered. +type syncContext struct { + eventRecorder events.Recorder + queue workqueue.RateLimitingInterface + queueKey string +} + +var _ SyncContext = syncContext{} + +// NewSyncContext gives new sync context. +func NewSyncContext(name string, recorder events.Recorder) SyncContext { + return syncContext{ + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), + eventRecorder: recorder.WithComponentSuffix(strings.ToLower(name)), + } +} + +func (c syncContext) Queue() workqueue.RateLimitingInterface { + return c.queue +} + +func (c syncContext) QueueKey() string { + return c.queueKey +} + +func (c syncContext) Recorder() events.Recorder { + return c.eventRecorder +} + +// eventHandler provides default event handler that is added to an informers passed to controller factory. +func (c syncContext) eventHandler(queueKeyFunc ObjectQueueKeyFunc, filter EventFilterFunc) cache.ResourceEventHandler { + resourceEventHandler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + runtimeObj, ok := obj.(runtime.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("added object %+v is not runtime Object", obj)) + return + } + c.Queue().Add(queueKeyFunc(runtimeObj)) + }, + UpdateFunc: func(old, new interface{}) { + runtimeObj, ok := new.(runtime.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj)) + return + } + c.Queue().Add(queueKeyFunc(runtimeObj)) + }, + DeleteFunc: func(obj interface{}) { + runtimeObj, ok := obj.(runtime.Object) + if !ok { + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + c.Queue().Add(queueKeyFunc(tombstone.Obj.(runtime.Object))) + return + } + utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj)) + return + } + c.Queue().Add(queueKeyFunc(runtimeObj)) + }, + } + if filter == nil { + return resourceEventHandler + } + return cache.FilteringResourceEventHandler{ + FilterFunc: filter, + Handler: resourceEventHandler, + } +} + +// namespaceChecker returns a function which returns true if an inpuut obj +// (or its tombstone) is a namespace and it matches a name of any namespaces +// that we are interested in +func namespaceChecker(interestingNamespaces []string) func(obj interface{}) bool { + interestingNamespacesSet := sets.NewString(interestingNamespaces...) + + return func(obj interface{}) bool { + ns, ok := obj.(*corev1.Namespace) + if ok { + return interestingNamespacesSet.Has(ns.Name) + } + + // the object might be getting deleted + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if ok { + if ns, ok := tombstone.Obj.(*corev1.Namespace); ok { + return interestingNamespacesSet.Has(ns.Name) + } + } + return false + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go new file mode 100644 index 0000000000..b70da95481 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go @@ -0,0 +1,26 @@ +package factory + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +func ObjectNameToKey(obj runtime.Object) string { + metaObj, ok := obj.(metav1.ObjectMetaAccessor) + if !ok { + return "" + } + return metaObj.GetObjectMeta().GetName() +} + +func NamesFilter(names ...string) EventFilterFunc { + nameSet := sets.NewString(names...) + return func(obj interface{}) bool { + metaObj, ok := obj.(metav1.ObjectMetaAccessor) + if !ok { + return false + } + return nameSet.Has(metaObj.GetObjectMeta().GetName()) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go new file mode 100644 index 0000000000..09ce3abdb2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go @@ -0,0 +1,276 @@ +package factory + +import ( + "context" + "fmt" + "time" + + "github.com/robfig/cron" + "k8s.io/apimachinery/pkg/runtime" + errorutil "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" + + "github.com/openshift/library-go/pkg/operator/events" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// DefaultQueueKey is the queue key used for string trigger based controllers. +const DefaultQueueKey = "key" + +// Factory is generator that generate standard Kubernetes controllers. +// Factory is really generic and should be only used for simple controllers that does not require special stuff.. +type Factory struct { + sync SyncFunc + syncContext SyncContext + syncDegradedClient operatorv1helpers.OperatorClient + resyncInterval time.Duration + resyncSchedules []string + informers []filteredInformers + informerQueueKeys []informersWithQueueKey + bareInformers []Informer + postStartHooks []PostStartHook + namespaceInformers []*namespaceInformer + cachesToSync []cache.InformerSynced + interestingNamespaces sets.String +} + +// Informer represents any structure that allow to register event handlers and informs if caches are synced. +// Any SharedInformer will comply. +type Informer interface { + AddEventHandler(handler cache.ResourceEventHandler) + HasSynced() bool +} + +type namespaceInformer struct { + informer Informer + nsFilter EventFilterFunc +} + +type informersWithQueueKey struct { + informers []Informer + filter EventFilterFunc + queueKeyFn ObjectQueueKeyFunc +} + +type filteredInformers struct { + informers []Informer + filter EventFilterFunc +} + +// PostStartHook specify a function that will run after controller is started. +// The context is cancelled when the controller is asked to shutdown and the post start hook should terminate as well. +// The syncContext allow access to controller queue and event recorder. +type PostStartHook func(ctx context.Context, syncContext SyncContext) error + +// ObjectQueueKeyFunc is used to make a string work queue key out of the runtime object that is passed to it. +// This can extract the "namespace/name" if you need to or just return "key" if you building controller that only use string +// triggers. +type ObjectQueueKeyFunc func(runtime.Object) string + +// EventFilterFunc is used to filter informer events to prevent Sync() from being called +type EventFilterFunc func(obj interface{}) bool + +// New return new factory instance. +func New() *Factory { + return &Factory{} +} + +// Sync is used to set the controller synchronization function. This function is the core of the controller and is +// usually hold the main controller logic. +func (f *Factory) WithSync(syncFn SyncFunc) *Factory { + f.sync = syncFn + return f +} + +// WithInformers is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +func (f *Factory) WithInformers(informers ...Informer) *Factory { + f.WithFilteredEventsInformers(nil, informers...) + return f +} + +// WithFilteredEventsInformers is used to register event handlers and get the caches synchronized functions. +// Pass the informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass filter to filter out events that should not trigger Sync() call. +func (f *Factory) WithFilteredEventsInformers(filter EventFilterFunc, informers ...Informer) *Factory { + f.informers = append(f.informers, filteredInformers{ + informers: informers, + filter: filter, + }) + return f +} + +// WithBareInformers allow to register informer that already has custom event handlers registered and no additional +// event handlers will be added to this informer. +// The controller will wait for the cache of this informer to be synced. +// The existing event handlers will have to respect the queue key function or the sync() implementation will have to +// count with custom queue keys. +func (f *Factory) WithBareInformers(informers ...Informer) *Factory { + f.bareInformers = append(f.bareInformers, informers...) + return f +} + +// WithInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +func (f *Factory) WithInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + queueKeyFn: queueKeyFn, + }) + return f +} + +// WithFilteredEventsInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +// Pass filter to filter out events that should not trigger Sync() call. +func (f *Factory) WithFilteredEventsInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, filter EventFilterFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + filter: filter, + queueKeyFn: queueKeyFn, + }) + return f +} + +// WithPostStartHooks allows to register functions that will run asynchronously after the controller is started via Run command. +func (f *Factory) WithPostStartHooks(hooks ...PostStartHook) *Factory { + f.postStartHooks = append(f.postStartHooks, hooks...) + return f +} + +// WithNamespaceInformer is used to register event handlers and get the caches synchronized functions. +// The sync function will only trigger when the object observed by this informer is a namespace and its name matches the interestingNamespaces. +// Do not use this to register non-namespace informers. +func (f *Factory) WithNamespaceInformer(informer Informer, interestingNamespaces ...string) *Factory { + f.namespaceInformers = append(f.namespaceInformers, &namespaceInformer{ + informer: informer, + nsFilter: namespaceChecker(interestingNamespaces), + }) + return f +} + +// ResyncEvery will cause the Sync() function to be called periodically, regardless of informers. +// This is useful when you want to refresh every N minutes or you fear that your informers can be stucked. +// If this is not called, no periodical resync will happen. +// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself. +// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects. +func (f *Factory) ResyncEvery(interval time.Duration) *Factory { + f.resyncInterval = interval + return f +} + +// ResyncSchedule allows to supply a Cron syntax schedule that will be used to schedule the sync() call runs. +// This allows more fine-tuned controller scheduling than ResyncEvery. +// Examples: +// +// factory.New().ResyncSchedule("@every 1s").ToController() // Every second +// factory.New().ResyncSchedule("@hourly").ToController() // Every hour +// factory.New().ResyncSchedule("30 * * * *").ToController() // Every hour on the half hour +// +// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself. +// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects. +func (f *Factory) ResyncSchedule(schedules ...string) *Factory { + f.resyncSchedules = append(f.resyncSchedules, schedules...) + return f +} + +// WithSyncContext allows to specify custom, existing sync context for this factory. +// This is useful during unit testing where you can override the default event recorder or mock the runtime objects. +// If this function not called, a SyncContext is created by the factory automatically. +func (f *Factory) WithSyncContext(ctx SyncContext) *Factory { + f.syncContext = ctx + return f +} + +// WithSyncDegradedOnError encapsulate the controller sync() function, so when this function return an error, the operator client +// is used to set the degraded condition to (eg. "ControllerFooDegraded"). The degraded condition name is set based on the controller name. +func (f *Factory) WithSyncDegradedOnError(operatorClient operatorv1helpers.OperatorClient) *Factory { + f.syncDegradedClient = operatorClient + return f +} + +// Controller produce a runnable controller. +func (f *Factory) ToController(name string, eventRecorder events.Recorder) Controller { + if f.sync == nil { + panic(fmt.Errorf("WithSync() must be used before calling ToController() in %q", name)) + } + + var ctx SyncContext + if f.syncContext != nil { + ctx = f.syncContext + } else { + ctx = NewSyncContext(name, eventRecorder) + } + + var cronSchedules []cron.Schedule + if len(f.resyncSchedules) > 0 { + var errors []error + for _, schedule := range f.resyncSchedules { + if s, err := cron.ParseStandard(schedule); err != nil { + errors = append(errors, err) + } else { + cronSchedules = append(cronSchedules, s) + } + } + if err := errorutil.NewAggregate(errors); err != nil { + panic(fmt.Errorf("failed to parse controller schedules for %q: %v", name, err)) + } + } + + c := &baseController{ + name: name, + syncDegradedClient: f.syncDegradedClient, + sync: f.sync, + resyncEvery: f.resyncInterval, + resyncSchedules: cronSchedules, + cachesToSync: append([]cache.InformerSynced{}, f.cachesToSync...), + syncContext: ctx, + postStartHooks: f.postStartHooks, + cacheSyncTimeout: defaultCacheSyncTimeout, + } + + // Warn about too fast resyncs as they might drain the operators QPS. + // This event is cheap as it is only emitted on operator startup. + if c.resyncEvery.Seconds() < 60 { + ctx.Recorder().Warningf("FastControllerResync", "Controller %q resync interval is set to %s which might lead to client request throttling", name, c.resyncEvery) + } + + for i := range f.informerQueueKeys { + for d := range f.informerQueueKeys[i].informers { + informer := f.informerQueueKeys[i].informers[d] + queueKeyFn := f.informerQueueKeys[i].queueKeyFn + informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(queueKeyFn, f.informerQueueKeys[i].filter)) + c.cachesToSync = append(c.cachesToSync, informer.HasSynced) + } + } + + for i := range f.informers { + for d := range f.informers[i].informers { + informer := f.informers[i].informers[d] + informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(func(runtime.Object) string { + return DefaultQueueKey + }, f.informers[i].filter)) + c.cachesToSync = append(c.cachesToSync, informer.HasSynced) + } + } + + for i := range f.bareInformers { + c.cachesToSync = append(c.cachesToSync, f.bareInformers[i].HasSynced) + } + + for i := range f.namespaceInformers { + f.namespaceInformers[i].informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(func(runtime.Object) string { + return DefaultQueueKey + }, f.namespaceInformers[i].nsFilter)) + c.cachesToSync = append(c.cachesToSync, f.namespaceInformers[i].informer.HasSynced) + } + + return c +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go new file mode 100644 index 0000000000..0ef98c6701 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go @@ -0,0 +1,47 @@ +package factory + +import ( + "context" + + "k8s.io/client-go/util/workqueue" + + "github.com/openshift/library-go/pkg/operator/events" +) + +// Controller interface represents a runnable Kubernetes controller. +// Cancelling the syncContext passed will cause the controller to shutdown. +// Number of workers determine how much parallel the job processing should be. +type Controller interface { + // Run runs the controller and blocks until the controller is finished. + // Number of workers can be specified via workers parameter. + // This function will return when all internal loops are finished. + // Note that having more than one worker usually means handing parallelization of Sync(). + Run(ctx context.Context, workers int) + + // Sync contain the main controller logic. + // This should not be called directly, but can be used in unit tests to exercise the sync. + Sync(ctx context.Context, controllerContext SyncContext) error + + // Name returns the controller name string. + Name() string +} + +// SyncContext interface represents a context given to the Sync() function where the main controller logic happen. +// SyncContext exposes controller name and give user access to the queue (for manual requeue). +// SyncContext also provides metadata about object that informers observed as changed. +type SyncContext interface { + // Queue gives access to controller queue. This can be used for manual requeue, although if a Sync() function return + // an error, the object is automatically re-queued. Use with caution. + Queue() workqueue.RateLimitingInterface + + // QueueKey represents the queue key passed to the Sync function. + QueueKey() string + + // Recorder provide access to event recorder. + Recorder() events.Recorder +} + +// SyncFunc is a function that contain main controller logic. +// The syncContext.syncContext passed is the main controller syncContext, when cancelled it means the controller is being shut down. +// The syncContext provides access to controller name, queue and event recorder. +type SyncFunc func(ctx context.Context, controllerContext SyncContext) error diff --git a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/OWNERS b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/OWNERS new file mode 100644 index 0000000000..bf630bd071 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/OWNERS @@ -0,0 +1,6 @@ +reviewers: + - deads2k + - sttts + - mfojtik +approvers: + - mfojtik diff --git a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go new file mode 100644 index 0000000000..52b617e059 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go @@ -0,0 +1,79 @@ +package fileobserver + +import ( + "fmt" + "os" + "time" + + "k8s.io/klog/v2" +) + +type Observer interface { + Run(stopChan <-chan struct{}) + HasSynced() bool + AddReactor(reaction ReactorFn, startingFileContent map[string][]byte, files ...string) Observer +} + +// ActionType define a type of action observed on the file +type ActionType int + +const ( + // FileModified means the file content was modified. + FileModified ActionType = iota + + // FileCreated means the file was just created. + FileCreated + + // FileDeleted means the file was deleted. + FileDeleted +) + +func (t ActionType) name() string { + switch t { + case FileCreated: + return "create" + case FileDeleted: + return "delete" + case FileModified: + return "modified" + default: + return "unknown" + } +} + +// String returns human readable form of action taken on a file. +func (t ActionType) String(filename string) string { + switch t { + case FileCreated: + return fmt.Sprintf("file %s was created", filename) + case FileDeleted: + return fmt.Sprintf("file %s was deleted", filename) + case FileModified: + return fmt.Sprintf("file %s was modified", filename) + } + return "" +} + +// ReactorFn define a reaction function called when an observed file is modified. +type ReactorFn func(file string, action ActionType) error + +// ExitOnChangeReactor provides reactor function that causes the process to exit when the change is detected. +// DEPRECATED: Using this function cause process to exit immediately without proper shutdown (context close/etc.) +// Use the TerminateOnChangeReactor() instead. +var ExitOnChangeReactor = TerminateOnChangeReactor(func() { os.Exit(0) }) + +func TerminateOnChangeReactor(terminateFn func()) ReactorFn { + return func(filename string, action ActionType) error { + klog.Infof("Triggering shutdown because %s", action.String(filename)) + terminateFn() + return nil + } +} + +func NewObserver(interval time.Duration) (Observer, error) { + return &pollingObserver{ + interval: interval, + reactors: map[string][]ReactorFn{}, + files: map[string]fileHashAndState{}, + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go new file mode 100644 index 0000000000..13f838b417 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go @@ -0,0 +1,223 @@ +package fileobserver + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "os" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog/v2" +) + +type pollingObserver struct { + interval time.Duration + reactors map[string][]ReactorFn + files map[string]fileHashAndState + + reactorsMutex sync.RWMutex + + syncedMutex sync.RWMutex + hasSynced bool +} + +// HasSynced indicates that the observer synced all observed files at least once. +func (o *pollingObserver) HasSynced() bool { + o.syncedMutex.RLock() + defer o.syncedMutex.RUnlock() + return o.hasSynced +} + +// AddReactor will add new reactor to this observer. +func (o *pollingObserver) AddReactor(reaction ReactorFn, startingFileContent map[string][]byte, files ...string) Observer { + o.reactorsMutex.Lock() + defer o.reactorsMutex.Unlock() + for _, f := range files { + if len(f) == 0 { + panic(fmt.Sprintf("observed file name must not be empty (%#v)", files)) + } + // Do not rehash existing files + if _, exists := o.files[f]; exists { + continue + } + var err error + + if startingContent, ok := startingFileContent[f]; ok { + klog.V(3).Infof("Starting from specified content for file %q", f) + // if empty starting content is specified, do not hash the empty string but just return it the same + // way as calculateFileHash() does in that case. + // in case the file exists and is empty, we don't care about the initial content anyway, because we + // are only going to react when the file content change. + // in case the file does not exists but empty string is specified as initial content, without this + // the content will be hashed and reaction will trigger as if the content changed. + if len(startingContent) == 0 { + var fileExists bool + if fileExists, err = isFile(f); err != nil { + panic(fmt.Sprintf("unexpected error while adding reactor for %#v: %v", files, err)) + } + o.files[f] = fileHashAndState{exists: fileExists} + o.reactors[f] = append(o.reactors[f], reaction) + continue + } + currentHash, emptyFile, err := calculateHash(bytes.NewBuffer(startingContent)) + if err != nil { + panic(fmt.Sprintf("unexpected error while adding reactor for %#v: %v", files, err)) + } + o.files[f] = fileHashAndState{exists: true, hash: currentHash, isEmpty: emptyFile} + } else { + klog.V(3).Infof("Adding reactor for file %q", f) + o.files[f], err = calculateFileHash(f) + if err != nil && !os.IsNotExist(err) { + panic(fmt.Sprintf("unexpected error while adding reactor for %#v: %v", files, err)) + } + } + o.reactors[f] = append(o.reactors[f], reaction) + } + return o +} + +func (o *pollingObserver) processReactors(stopCh <-chan struct{}) { + err := wait.PollImmediateInfinite(o.interval, func() (bool, error) { + select { + case <-stopCh: + return true, nil + default: + } + o.reactorsMutex.RLock() + defer o.reactorsMutex.RUnlock() + for filename, reactors := range o.reactors { + currentFileState, err := calculateFileHash(filename) + if err != nil && !os.IsNotExist(err) { + return false, err + } + + lastKnownFileState := o.files[filename] + o.files[filename] = currentFileState + + for i := range reactors { + var action ActionType + switch { + case !lastKnownFileState.exists && !currentFileState.exists: + // skip non-existing file + continue + case !lastKnownFileState.exists && currentFileState.exists && (len(currentFileState.hash) > 0 || currentFileState.isEmpty): + // if we see a new file created that has content or its empty, trigger FileCreate action + klog.Infof("Observed file %q has been created (hash=%q)", filename, currentFileState.hash) + action = FileCreated + case lastKnownFileState.exists && !currentFileState.exists: + klog.Infof("Observed file %q has been deleted", filename) + action = FileDeleted + case lastKnownFileState.hash == currentFileState.hash: + // skip if the hashes are the same + continue + case lastKnownFileState.hash != currentFileState.hash: + klog.Infof("Observed file %q has been modified (old=%q, new=%q)", filename, lastKnownFileState.hash, currentFileState.hash) + action = FileModified + } + // increment metrics counter for this file + observerActionsMetrics.WithLabelValues(filename, action.name()).Inc() + // execute the register reactor + if err := reactors[i](filename, action); err != nil { + klog.Errorf("Reactor for %q failed: %v", filename, err) + } + } + } + if !o.HasSynced() { + o.syncedMutex.Lock() + o.hasSynced = true + o.syncedMutex.Unlock() + klog.V(3).Info("File observer successfully synced") + } + return false, nil + }) + if err != nil { + klog.Fatalf("file observer failed: %v", err) + } +} + +var observerActionsMetrics = metrics.NewCounterVec(&metrics.CounterOpts{ + Subsystem: "fileobserver", + Name: "action_count", + Help: "Counter for every observed action for all monitored files", + StabilityLevel: metrics.ALPHA, +}, []string{"name", "filename"}) + +func init() { + (&sync.Once{}).Do(func() { + legacyregistry.MustRegister(observerActionsMetrics) + }) +} + +// Run will start a new observer. +func (o *pollingObserver) Run(stopChan <-chan struct{}) { + klog.Info("Starting file observer") + defer klog.Infof("Shutting down file observer") + o.processReactors(stopChan) +} + +type fileHashAndState struct { + hash string + exists bool + isEmpty bool +} + +func calculateFileHash(path string) (fileHashAndState, error) { + result := fileHashAndState{} + if exists, err := isFile(path); !exists || err != nil { + return result, err + } + + f, err := os.Open(path) + if err != nil { + return result, err + } + defer f.Close() + // at this point we know for sure the file exists and we can read its content even if that content is empty + result.exists = true + + hash, empty, err := calculateHash(f) + if err != nil { + return result, err + } + + result.hash = hash + result.isEmpty = empty + + return result, nil +} + +func calculateHash(content io.Reader) (string, bool, error) { + hasher := sha256.New() + written, err := io.Copy(hasher, content) + if err != nil { + return "", false, err + } + // written == 0 means the content is empty + if written == 0 { + return "", true, nil + } + return hex.EncodeToString(hasher.Sum(nil)), false, nil +} + +func isFile(path string) (bool, error) { + stat, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + // this is fatal + if stat.IsDir() { + return false, fmt.Errorf("%s is a directory", path) + } + + return true, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/metrics/client_metrics.go b/vendor/github.com/openshift/library-go/pkg/controller/metrics/client_metrics.go new file mode 100644 index 0000000000..c36e2d3c86 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/metrics/client_metrics.go @@ -0,0 +1,97 @@ +package metrics + +import ( + "net/url" + "time" + + "github.com/blang/semver" + k8smetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // requestLatency is a Prometheus Summary metric type partitioned by + // "verb" and "url" labels. It is used for the rest client latency metrics. + requestLatency = k8smetrics.NewHistogramVec( + &k8smetrics.HistogramOpts{ + Name: "rest_client_request_latency_seconds", + Help: "Request latency in seconds. Broken down by verb and URL.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), + }, + []string{"verb", "url"}, + ) + + requestResult = k8smetrics.NewCounterVec( + &k8smetrics.CounterOpts{ + Name: "rest_client_requests_total", + Help: "Number of HTTP requests, partitioned by status code, method, and host.", + }, + []string{"code", "method", "host"}, + ) +) + +func init() { + legacyregistry.MustRegister(requestLatency) + legacyregistry.MustRegister(requestResult) + + legacyregistry.Register(&latencyAdapter{requestLatency}) + legacyregistry.Register(&resultAdapter{requestResult}) +} + +type latencyAdapter struct { + m *k8smetrics.HistogramVec +} + +func (l *latencyAdapter) Describe(c chan<- *prometheus.Desc) { + l.m.Describe(c) +} + +func (l *latencyAdapter) Collect(c chan<- prometheus.Metric) { + l.m.Collect(c) +} + +func (l *latencyAdapter) Create(version *semver.Version) bool { + return l.m.Create(version) +} + +func (l *latencyAdapter) Observe(verb string, u url.URL, latency time.Duration) { + l.m.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) +} + +func (l *latencyAdapter) ClearState() { + l.m.Reset() +} + +func (l *latencyAdapter) FQName() string { + return l.m.FQName() +} + +type resultAdapter struct { + m *k8smetrics.CounterVec +} + +func (r *resultAdapter) Describe(c chan<- *prometheus.Desc) { + r.m.Describe(c) +} + +func (r *resultAdapter) Collect(c chan<- prometheus.Metric) { + r.m.Collect(c) +} + +func (r *resultAdapter) Create(version *semver.Version) bool { + return r.m.Create(version) +} + +func (r *resultAdapter) Increment(code, method, host string) { + r.m.WithLabelValues(code, method, host).Inc() +} + +func (r *resultAdapter) ClearState() { + r.m.Reset() +} + +func (r *resultAdapter) FQName() string { + return r.m.FQName() +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go b/vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go new file mode 100644 index 0000000000..47002e74d9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go @@ -0,0 +1,210 @@ +package metrics + +import ( + "k8s.io/client-go/util/workqueue" + k8smetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog/v2" + + "github.com/prometheus/client_golang/prometheus" +) + +// Package prometheus sets the workqueue DefaultMetricsFactory to produce +// prometheus metrics. To use this package, you just have to import it. + +func init() { + workqueue.SetProvider(prometheusMetricsProvider{}) +} + +// Package prometheus sets the workqueue DefaultMetricsFactory to produce +// prometheus metrics. To use this package, you just have to import it. + +// Metrics subsystem and keys used by the workqueue. +const ( + WorkQueueSubsystem = "workqueue" + DepthKey = "depth" + AddsKey = "adds_total" + QueueLatencyKey = "queue_duration_seconds" + WorkDurationKey = "work_duration_seconds" + UnfinishedWorkKey = "unfinished_work_seconds" + LongestRunningProcessorKey = "longest_running_processor_seconds" + RetriesKey = "retries_total" +) + +func init() { + workqueue.SetProvider(prometheusMetricsProvider{}) +} + +type prometheusMetricsProvider struct{} + +func (prometheusMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + depth := k8smetrics.NewGauge(&k8smetrics.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: DepthKey, + Help: "Current depth of workqueue", + ConstLabels: prometheus.Labels{"name": name}, + }) + legacyregistry.Register(depth) + return depth +} + +func (prometheusMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + adds := k8smetrics.NewCounter(&k8smetrics.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: AddsKey, + Help: "Total number of adds handled by workqueue", + ConstLabels: prometheus.Labels{"name": name}, + }) + legacyregistry.Register(adds) + return adds +} + +func (prometheusMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + latency := k8smetrics.NewHistogram(&k8smetrics.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: QueueLatencyKey, + Help: "How long in seconds an item stays in workqueue before being requested.", + ConstLabels: prometheus.Labels{"name": name}, + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }) + legacyregistry.Register(latency) + return latency +} + +func (prometheusMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + workDuration := k8smetrics.NewHistogram(&k8smetrics.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: WorkDurationKey, + Help: "How long in seconds processing an item from workqueue takes.", + ConstLabels: prometheus.Labels{"name": name}, + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }) + legacyregistry.Register(workDuration) + return workDuration +} + +func (prometheusMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + unfinished := k8smetrics.NewGauge(&k8smetrics.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: UnfinishedWorkKey, + Help: "How many seconds of work has done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + ConstLabels: prometheus.Labels{"name": name}, + }) + legacyregistry.Register(unfinished) + return unfinished +} + +func (prometheusMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + unfinished := k8smetrics.NewGauge(&k8smetrics.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: LongestRunningProcessorKey, + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + ConstLabels: prometheus.Labels{"name": name}, + }) + legacyregistry.Register(unfinished) + return unfinished +} + +func (prometheusMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + retries := k8smetrics.NewCounter(&k8smetrics.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: RetriesKey, + Help: "Total number of retries handled by workqueue", + ConstLabels: prometheus.Labels{"name": name}, + }) + legacyregistry.Register(retries) + return retries +} + +// TODO(danielqsj): Remove the following metrics, they are deprecated +func (prometheusMetricsProvider) NewDeprecatedDepthMetric(name string) workqueue.GaugeMetric { + depth := k8smetrics.NewGauge(&k8smetrics.GaugeOpts{ + Subsystem: name, + Name: "depth", + Help: "(Deprecated) Current depth of workqueue: " + name, + }) + if err := legacyregistry.Register(depth); err != nil { + klog.Errorf("failed to register depth metric %v: %v", name, err) + } + return depth +} + +func (prometheusMetricsProvider) NewDeprecatedAddsMetric(name string) workqueue.CounterMetric { + adds := k8smetrics.NewCounter(&k8smetrics.CounterOpts{ + Subsystem: name, + Name: "adds", + Help: "(Deprecated) Total number of adds handled by workqueue: " + name, + }) + if err := legacyregistry.Register(adds); err != nil { + klog.Errorf("failed to register adds metric %v: %v", name, err) + } + return adds +} + +func (prometheusMetricsProvider) NewDeprecatedLatencyMetric(name string) workqueue.SummaryMetric { + latency := k8smetrics.NewSummary(&k8smetrics.SummaryOpts{ + Subsystem: name, + Name: "queue_latency", + Help: "(Deprecated) How long an item stays in workqueue" + name + " before being requested.", + }) + if err := legacyregistry.Register(latency); err != nil { + klog.Errorf("failed to register latency metric %v: %v", name, err) + } + return latency +} + +func (prometheusMetricsProvider) NewDeprecatedWorkDurationMetric(name string) workqueue.SummaryMetric { + workDuration := k8smetrics.NewSummary(&k8smetrics.SummaryOpts{ + Subsystem: name, + Name: "work_duration", + Help: "(Deprecated) How long processing an item from workqueue" + name + " takes.", + }) + if err := legacyregistry.Register(workDuration); err != nil { + klog.Errorf("failed to register work_duration metric %v: %v", name, err) + } + return workDuration +} + +func (prometheusMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + unfinished := k8smetrics.NewGauge(&k8smetrics.GaugeOpts{ + Subsystem: name, + Name: "unfinished_work_seconds", + Help: "(Deprecated) How many seconds of work " + name + " has done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + }) + if err := legacyregistry.Register(unfinished); err != nil { + klog.Errorf("failed to register unfinished_work_seconds metric %v: %v", name, err) + } + return unfinished +} + +func (prometheusMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) workqueue.SettableGaugeMetric { + unfinished := k8smetrics.NewGauge(&k8smetrics.GaugeOpts{ + Subsystem: name, + Name: "longest_running_processor_microseconds", + Help: "(Deprecated) How many microseconds has the longest running " + + "processor for " + name + " been running.", + }) + if err := legacyregistry.Register(unfinished); err != nil { + klog.Errorf("failed to register longest_running_processor_microseconds metric %v: %v", name, err) + } + return unfinished +} + +func (prometheusMetricsProvider) NewDeprecatedRetriesMetric(name string) workqueue.CounterMetric { + retries := k8smetrics.NewCounter(&k8smetrics.CounterOpts{ + Subsystem: name, + Name: "retries", + Help: "(Deprecated) Total number of retries handled by workqueue: " + name, + }) + if err := legacyregistry.Register(retries); err != nil { + klog.Errorf("failed to register retries metric %v: %v", name, err) + } + return retries +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go b/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go index d35c052f37..24b8533f13 100644 --- a/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go +++ b/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go @@ -222,6 +222,16 @@ func PrioritizeTags(tags []string) { } } +// SpecHasTag returns named tag from image stream's spec and boolean whether one was found. +func SpecHasTag(stream *imagev1.ImageStream, name string) (imagev1.TagReference, bool) { + for _, tag := range stream.Spec.Tags { + if tag.Name == name { + return tag, true + } + } + return imagev1.TagReference{}, false +} + // StatusHasTag returns named tag from image stream's status and boolean whether one was found. func StatusHasTag(stream *imagev1.ImageStream, name string) (imagev1.NamedTagEventList, bool) { for _, tag := range stream.Status.Tags { @@ -286,10 +296,149 @@ func ImageWithMetadataOrDie(image *imagev1.Image) { } } +// TagReferencesLocalTag returns true if the provided tag reference references another image stream tag +// in the current image stream. This is only true when from points to an ImageStreamTag without a colon +// or from.name is :. +func TagReferencesLocalTag(stream *imagev1.ImageStream, tag imagev1.TagReference) (string, bool) { + if tag.From == nil || tag.From.Kind != "ImageStreamTag" { + return "", false + } + if len(tag.From.Namespace) > 0 && tag.From.Namespace != stream.Namespace { + return "", false + } + ref := strings.TrimPrefix(tag.From.Name, stream.Name+":") + if strings.Contains(ref, ":") { + return "", false + } + return ref, true +} + +var ( + // ErrNoStreamRepository is returned if the status dockerImageRepository field was unset but the + // method required that value to create a pull spec. + ErrNoStreamRepository = fmt.Errorf("no image repository has been set on the image stream status") + // ErrWaitForPullSpec is returned when a pull spec cannot be inferred from the image stream automatically + // and the user requires a valid image tag. + ErrWaitForPullSpec = fmt.Errorf("the pull spec cannot be determined yet") +) + +// ResolveNewestPullSpecForTag returns the most recent available pull spec for the given tag, even +// if importing that pull spec is still in progress or has failed. Use this method when the current +// state of the tag as the user sees it is important because you don't want to silently ignore a +// newer tag request that hasn't yet been imported. Note that if no image has been tagged or pushed, +// pullSpec will still be returned pointing to the pull spec for the tag within the image repository +// (: unless defaultExternal is set) and isTagEmpty will be true. +// hasStatus is true if the returned pull spec points to an imported / pushed image, or false if +// a spec tag has not been specified, the spec tag hasn't been imported, or the import has failed. +// An error is returned only if isTagEmpty is true and status.dockerImageRepository is unset because +// the administrator has not installed a registry server. +// +// Use this method when you need the user intent pull spec and you do not want to tolerate a slightly +// older image (tooling that needs to error if the user's intent in tagging isn't realized). +func ResolveNewestPullSpecForTag(stream *imagev1.ImageStream, tag string, defaultExternal bool) (pullSpec string, hasStatus, isTagEmpty bool, err error) { + pullSpec, _, hasStatus, isTagEmpty, err = resolvePullSpecForTag(stream, tag, defaultExternal, true) + return pullSpec, hasStatus, isTagEmpty, err +} + +// ResolveRecentPullSpecForTag returns the most recent successfully imported pull sec for the +// given tag, i.e. "last-known-good". Use this method when you can tolerate some lag in picking up +// the newest version. This method is roughly equivalent to the behavior of pulling the pod from +// the internal registry. If no image has been tagged or pushed, pullSpec will still be returned +// pointing to the pull spec for the tag within the image repository +// (: unless defaultExternal is set) and isTagEmpty will be true. +// hasNewer is true if the pull spec does not represent the newest user input, or false if the +// current user spec tag has been imported successfully. hasStatus is true if the returned pull +// spec points to an imported / pushed image, or false if a spec tag has not been specified, the +// spec tag hasn't been imported, or the import has failed. An error is returned only if isTagEmpty +// is true and status.dockerImageRepository is unset because the administrator has not installed a +// registry server. +// +// This method is typically used by consumers that need the value at the tag and prefer to have a +// slightly older image over not getting any image at all (or if the image can't be imported +// due to temporary network or controller issues). +func ResolveRecentPullSpecForTag(stream *imagev1.ImageStream, tag string, defaultExternal bool) (pullSpec string, hasNewer, hasStatus, isTagEmpty bool, err error) { + pullSpec, hasNewer, hasStatus, isTagEmpty, err = resolvePullSpecForTag(stream, tag, defaultExternal, false) + return pullSpec, hasNewer, hasStatus, isTagEmpty, err +} + +// resolvePullSpecForTag handles finding the most accurate pull spec depending on whether the user +// requires the latest or simply wants the most recent imported version (ignores pending imports). +// If a pull spec cannot be inferred an error is returned. Otherwise the following status values are +// returned: +// +// * hasNewer - a newer version of this tag is being imported but is not ready +// * hasStatus - this pull spec points to the latest image in the status (has been imported / pushed) +// * isTagEmpty - no pull spec or push has occurred to this tag, but it's still possible to get a pull spec +// +// defaultExternal is considered when isTagEmpty is true (no user input provided) and calculates the pull +// spec from the external repository base (status.publicDockerImageRepository) if it is set. +func resolvePullSpecForTag(stream *imagev1.ImageStream, tag string, defaultExternal, requireLatest bool) (pullSpec string, hasNewer, hasStatus, isTagEmpty bool, err error) { + if len(tag) == 0 { + tag = imagev1.DefaultImageTag + } + status, _ := StatusHasTag(stream, tag) + spec, hasSpec := SpecHasTag(stream, tag) + hasSpecTagRef := hasSpec && spec.From != nil && spec.From.Kind == "DockerImage" && spec.ReferencePolicy.Type == imagev1.SourceTagReferencePolicy + + var event *imagev1.TagEvent + switch { + case len(status.Items) == 0: + // nothing in status: + // - waiting for import of first image (generation of spec > status) + // - spec is empty + // - spec is a ref tag to something else that hasn't been imported yet + // - spec is a ref tag to another spec tag on this same image stream that doesn't exist + + case hasSpec && spec.Generation != nil && *spec.Generation > status.Items[0].Generation: + // waiting for import because spec generation is newer and had a previous image + if requireLatest { + // note: if spec tag doesn't have a DockerImage kind, we'll have to wait for whatever + // logic is necessary for import to run (this could happen if a new Kind is introduced) + if !hasSpecTagRef { + return "", hasNewer, false, false, ErrWaitForPullSpec + } + } else { + event = &status.Items[0] + hasNewer = true + } + default: + // this is the latest version of the image + event = &status.Items[0] + } + + switch { + case event != nil: + hasStatus = true + pullSpec = resolveReferenceForTagEvent(stream, spec, event) + case hasSpecTagRef: + // if the user explicitly provided a spec tag we can use + pullSpec = resolveReferenceForTagEvent(stream, spec, &imagev1.TagEvent{ + DockerImageReference: spec.From.Name, + }) + default: + isTagEmpty = true + repositorySpec := stream.Status.DockerImageRepository + if defaultExternal && len(stream.Status.PublicDockerImageRepository) > 0 { + repositorySpec = stream.Status.PublicDockerImageRepository + } + if len(repositorySpec) == 0 { + return "", false, false, false, ErrNoStreamRepository + } + pullSpec = JoinImageStreamTag(repositorySpec, tag) + } + return pullSpec, hasNewer, hasStatus, isTagEmpty, nil +} + // ResolveLatestTaggedImage returns the appropriate pull spec for a given tag in // the image stream, handling the tag's reference policy if necessary to return // a resolved image. Callers that transform an ImageStreamTag into a pull spec -// should use this method instead of LatestTaggedImage. +// should use this method instead of LatestTaggedImage. This method ignores pending +// imports (meaning the requested image may be stale) and will return no pull spec +// even if one is available on the spec tag (when importing kind DockerImage) if +// import has not completed. +// +// Use ResolvePullSpecForTag() if you wish more control over what type of pull spec +// is returned and what scenarios should be handled. func ResolveLatestTaggedImage(stream *imagev1.ImageStream, tag string) (string, bool) { if len(tag) == 0 { tag = imagev1.DefaultImageTag @@ -300,31 +449,21 @@ func ResolveLatestTaggedImage(stream *imagev1.ImageStream, tag string) (string, // ResolveTagReference applies the tag reference rules for a stream, tag, and tag event for // that tag. It returns true if the tag is func resolveTagReference(stream *imagev1.ImageStream, tag string, latest *imagev1.TagEvent) (string, bool) { + // no image has been imported, so we can't resolve to a tagged image (we need an image id) if latest == nil { return "", false } - return resolveReferenceForTagEvent(stream, tag, latest), true -} - -// SpecHasTag returns named tag from image stream's spec and boolean whether one was found. -func SpecHasTag(stream *imagev1.ImageStream, name string) (imagev1.TagReference, bool) { - for _, tag := range stream.Spec.Tags { - if tag.Name == name { - return tag, true - } - } - return imagev1.TagReference{}, false -} - -// ResolveReferenceForTagEvent applies the tag reference rules for a stream, tag, and tag event for -// that tag. -func resolveReferenceForTagEvent(stream *imagev1.ImageStream, tag string, latest *imagev1.TagEvent) string { // retrieve spec policy - if not found, we use the latest spec ref, ok := SpecHasTag(stream, tag) if !ok { - return latest.DockerImageReference + return latest.DockerImageReference, true } + return resolveReferenceForTagEvent(stream, ref, latest), true +} +// resolveReferenceForTagEvent applies the tag reference rules for a stream, tag, and tag event for +// that tag. +func resolveReferenceForTagEvent(stream *imagev1.ImageStream, ref imagev1.TagReference, latest *imagev1.TagEvent) string { switch ref.ReferencePolicy.Type { // the local reference policy attempts to use image pull through on the integrated // registry if possible diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go index 1689a5cb49..54bba11ed2 100644 --- a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go @@ -1,6 +1,7 @@ package registryclient import ( + "context" "fmt" "hash" "io" @@ -12,7 +13,6 @@ import ( "sync" "time" - "golang.org/x/net/context" "golang.org/x/time/rate" "k8s.io/klog/v2" @@ -26,6 +26,8 @@ import ( "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" "github.com/opencontainers/go-digest" + + imagereference "github.com/openshift/library-go/pkg/image/reference" ) // RepositoryRetriever fetches a Docker distribution.Repository. @@ -80,6 +82,7 @@ type Context struct { Credentials auth.CredentialStore RequestModifiers []transport.RequestModifier Limiter *rate.Limiter + Alternates AlternateBlobSourceStrategy DisableDigestVerification bool @@ -138,6 +141,11 @@ func (c *Context) WithCredentials(credentials auth.CredentialStore) *Context { return c } +func (c *Context) WithAlternateBlobSourceStrategy(alternateStrategy AlternateBlobSourceStrategy) *Context { + c.Alternates = alternateStrategy + return c +} + // Reset clears any cached repository info for this context. func (c *Context) Reset() { c.lock.Lock() @@ -200,18 +208,68 @@ func (c *Context) Ping(ctx context.Context, registry *url.URL, insecure bool) (h return t, &src, nil } +// RepositoryForRef returns a distribution.Repository against the provided image reference. If insecure +// is true, HTTP connections are allowed and HTTPS certificate verification errors will be ignored. The returned +// Repository instance is threadsafe but the ManifestService, TagService, or BlobService are not. +func (c *Context) RepositoryForRef(ctx context.Context, ref imagereference.DockerImageReference, insecure bool) (distribution.Repository, error) { + return c.connectToRegistry(ctx, repositoryLocator{ref: ref}, insecure) +} + +// Repository returns a distribution.Repository against the provided registry and repository name. If insecure +// is true, HTTP connections are allowed and HTTPS certificate verification errors will be ignored. The returned +// Repository instance is threadsafe but the ManifestService, TagService, or BlobService are not. Note - the caller +// is responsible for providing a valid registry url for docker.io - use RepositoryForRef() to avoid that. func (c *Context) Repository(ctx context.Context, registry *url.URL, repoName string, insecure bool) (distribution.Repository, error) { named, err := reference.WithName(repoName) if err != nil { return nil, err } + ref, err := imagereference.Parse(repoName) + if err != nil { + return nil, err + } + ref.Registry = registry.Host + locator := repositoryLocator{ + named: named, + ref: ref, + url: registry, + } + return &blobMirroredRepository{ + locator: locator, + insecure: insecure, + strategy: c.Alternates, + retriever: c, + }, nil +} + +// connectToRegistry is private and returns a non-wrapped, non-mirrorable repository. +func (c *Context) connectToRegistry(ctx context.Context, locator repositoryLocator, insecure bool) (RepositoryWithLocation, error) { + var named reference.Named = locator.named + var registryURL *url.URL = locator.url + var path string + + // ensure the values needed from the locator are defaulted + if named == nil { + path = locator.ref.RepositoryName() + var err error + named, err = reference.WithName(path) + if err != nil { + return nil, err + } + } else { + path = reference.Path(named) + } + if registryURL == nil { + registryURL = locator.ref.RegistryURL() + } - rt, src, err := c.Ping(ctx, registry, insecure) + // attempt to connect to the registry to get auth instructions + rt, src, err := c.Ping(ctx, registryURL, insecure) if err != nil { return nil, err } - rt = c.repositoryTransport(rt, src, repoName) + rt = c.repositoryTransport(rt, src, path) repo, err := registryclient.NewRepository(named, src.String(), rt) if err != nil { @@ -224,7 +282,7 @@ func (c *Context) Repository(ctx context.Context, registry *url.URL, repoName st if limiter == nil { limiter = rate.NewLimiter(rate.Limit(5), 5) } - return NewLimitedRetryRepository(repo, c.Retries, limiter), nil + return NewLimitedRetryRepository(locator.ref, repo, c.Retries, limiter), nil } func (c *Context) ping(registry url.URL, insecure bool, transport http.RoundTripper) (*url.URL, error) { @@ -359,6 +417,7 @@ var nowFn = time.Now type retryRepository struct { distribution.Repository + ref imagereference.DockerImageReference limiter *rate.Limiter retries int sleepFn func(time.Duration) @@ -366,16 +425,21 @@ type retryRepository struct { // NewLimitedRetryRepository wraps a distribution.Repository with helpers that will retry temporary failures // over a limited time window and duration, and also obeys a rate limit. -func NewLimitedRetryRepository(repo distribution.Repository, retries int, limiter *rate.Limiter) distribution.Repository { +func NewLimitedRetryRepository(ref imagereference.DockerImageReference, repo distribution.Repository, retries int, limiter *rate.Limiter) RepositoryWithLocation { return &retryRepository{ Repository: repo, + ref: ref, limiter: limiter, retries: retries, sleepFn: time.Sleep, } } +func (r *retryRepository) Ref() imagereference.DockerImageReference { + return r.ref +} + // isTemporaryHTTPError returns true if the error indicates a temporary or partial HTTP failure func isTemporaryHTTPError(err error) (time.Duration, bool) { if err == nil { diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_mirrored.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_mirrored.go new file mode 100644 index 0000000000..21f4b86e3b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_mirrored.go @@ -0,0 +1,527 @@ +package registryclient + +import ( + "context" + "fmt" + "net/http" + "net/url" + "sync" + + "github.com/docker/distribution" + "github.com/opencontainers/go-digest" + "github.com/openshift/library-go/pkg/image/reference" + "k8s.io/klog/v2" + + distributionreference "github.com/docker/distribution/reference" +) + +// AlternateBlobSourceStrategy is consulted when a repository cannot be reached to find alternate +// repositories that may be able to serve a given content-addressed blob. The strategy is consulted +// at most twice - once before any request is made to a given repository. If FirstRequest() returns a +// list of alternates, OnFailure is not invoked. +type AlternateBlobSourceStrategy interface { + // FirstRequest returns the set of locations that should be searched in a preferred order. If locator + // is not included in the response it will not be searched. If alternateRepositories is an empty list + // no lookup will be performed and requests will exit with an error. If alternateRepositories is nil + // and err is nil, OnFailure will be invoked if the first request fails. + FirstRequest(ctx context.Context, locator reference.DockerImageReference) (alternateRepositories []reference.DockerImageReference, err error) + // OnFailure is invoked if FirstRequest returned no error and a nil list of locations if and only if + // an API call fails on the specified request. The result of alternateRepositories is cached for + // subsequent calls to that repository. + OnFailure(ctx context.Context, locator reference.DockerImageReference) (alternateRepositories []reference.DockerImageReference, err error) +} + +// ManifestWithLocationService extends the ManifestService to allow clients to retrieve a manifest and +// get the location of the mirrored manifest. Not all ManifestServices returned from a Repository will +// support this interface and it must be conditional. +type ManifestWithLocationService interface { + distribution.ManifestService + + // GetWithLocation returns the registry URL the provided manifest digest was retrieved from which may be Repository.Named(), + // or one of the blob mirrors if alternate location for blob sources was provided. It returns an error if the digest could not be + // located - if an error is returned the source reference (Repository.Named()) will be set. + GetWithLocation(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, reference.DockerImageReference, error) +} + +// RepositoryWithLocation extends the Repository and allows clients to know which repository registry this talks to +// as primary (as a complement to Named() which does not include the URL). +type RepositoryWithLocation interface { + distribution.Repository + + // Ref returns the DockerImageReference representing this repository. + Ref() reference.DockerImageReference +} + +// blobMirroredRepoRetriever allows a caller to retrieve a distribution.Repository. It may perform +// requests to authorize the client and will return an error if it fails. +type blobMirroredRepoRetriever interface { + connectToRegistry(context.Context, repositoryLocator, bool) (RepositoryWithLocation, error) +} + +// repositoryLocator caches the components necessary to connect to a single image repository. +type repositoryLocator struct { + ref reference.DockerImageReference + // url may specify a default protocol (http) instead of (https), but is otherwise calculated + // by taking ref.Registry and applying it to url.Host + url *url.URL + // named is the image repository path on the server (namespace and name in ref terms) and is + // required for the distribution registryclient. + named distributionreference.Named +} + +// blobMirroredRepository provides failover lookup behavior for blobs in a given repository on +// errors by delegating to the provided strategy for the first request or when a failure occurs. +// The strategy is expected to return a set of alternate locations to consume content from, +// which may not include the original source. Only requests made for content addressable blobs +// may be consulted in this fashion (anything via digest) - everything else must use source(). +type blobMirroredRepository struct { + locator repositoryLocator + insecure bool + + strategy AlternateBlobSourceStrategy + retriever blobMirroredRepoRetriever + + lock sync.Mutex + order []reference.DockerImageReference + repos map[reference.DockerImageReference]RepositoryWithLocation +} + +// Named returns the name of the repository. +func (r *blobMirroredRepository) Named() distributionreference.Named { + return r.locator.named +} + +// Named returns the name of the repository. +func (r *blobMirroredRepository) Ref() reference.DockerImageReference { + return r.locator.ref +} + +// Manifests wraps the manifest service in a blobMirroredManifest for shared retries. +func (r *blobMirroredRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + return &blobMirroredManifest{repo: r, options: options}, nil +} + +// Blobs wraps the blob service in a blobMirroredBlobstore for shared retries. +func (r *blobMirroredRepository) Blobs(ctx context.Context) distribution.BlobStore { + return blobMirroredBlobstore{repo: r} +} + +// Tags lists the tags under the named repository. +func (r *blobMirroredRepository) Tags(ctx context.Context) distribution.TagService { + return blobMirroredTags{repo: r} +} + +var ( + errNoValidAlternates = fmt.Errorf("no valid alterative sources for this content located") + errNoValidSource = fmt.Errorf("no source repository defined for accessing the repository") +) + +// initialRepos returns a list of locations to attempt to access, a boolean indicating that alternates +// were suggested, or an error. +func (r *blobMirroredRepository) initialRepos(ctx context.Context) ([]reference.DockerImageReference, bool, error) { + if r.strategy == nil { + return []reference.DockerImageReference{r.locator.ref}, false, nil + } + + // protect FirstRequest being called only one at a time and r.order writes + r.lock.Lock() + defer r.lock.Unlock() + if r.order != nil { + return r.order, true, nil + } + alternates, err := r.strategy.FirstRequest(ctx, r.locator.ref) + if err != nil { + return nil, false, err + } + if len(alternates) == 0 { + return []reference.DockerImageReference{r.locator.ref}, false, nil + } + r.order = alternates + return r.order, len(alternates) > 0, nil +} + +// errorRepos returns a list of alternate registries to search for the provided content. +func (r *blobMirroredRepository) errorRepos(ctx context.Context) ([]reference.DockerImageReference, error) { + if r.strategy == nil { + return nil, nil + } + + // TODO: potentially filter certain types of errors, maybe even per method type, if we ever + // retry non-idempotent operations + // protect OnFailure being called one at a time and r.order writes + r.lock.Lock() + defer r.lock.Unlock() + if r.order != nil { + return nil, nil + } + alternates, err := r.strategy.OnFailure(ctx, r.locator.ref) + if err != nil { + return nil, err + } + r.order = alternates + return r.order, nil +} + +// attemptRepos will invoke fn on all repos until fn returns no error. fn is expected to be idempotent. +func (r *blobMirroredRepository) attemptRepos(ctx context.Context, repos []reference.DockerImageReference, fn func(r RepositoryWithLocation) error) error { + var firstErr error + for _, ref := range repos { + klog.V(5).Infof("Attempting to connect to %s", ref) + repo, err := r.connect(ctx, ref) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + if err := fn(repo); err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + return nil + } + return firstErr +} + +// attemptFirstConnectedRepo will invoke fn on the first repo that successfully connects. +func (r *blobMirroredRepository) attemptFirstConnectedRepo(ctx context.Context, repos []reference.DockerImageReference, fn func(r RepositoryWithLocation) error) error { + var firstErr error + for _, ref := range repos { + klog.V(5).Infof("Attempting to connect to %s", ref) + repo, err := r.connect(ctx, ref) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + return fn(repo) + } + return firstErr +} + +// alternates accesses the set of repositories that may be valid alternatives for accessing content +func (r *blobMirroredRepository) alternates(ctx context.Context, fn func(r RepositoryWithLocation) error) error { + repos, loaded, err := r.initialRepos(ctx) + if err != nil { + return err + } + if attemptErr := r.attemptRepos(ctx, repos, fn); attemptErr != nil { + if loaded { + return attemptErr + } + alternates, err := r.errorRepos(ctx) + if err != nil { + return err + } + if len(alternates) == 0 { + return attemptErr + } + if alternateErr := r.attemptRepos(ctx, alternates, fn); alternateErr != nil { + return attemptErr + } + } + return nil +} + +// firstConnectedAlternate invokes fn on the first alternate that can be connected to. Use when the +// function can only be invoked once (such as a method with side effects, like ServeBlob which writes +// to the response). +func (r *blobMirroredRepository) firstConnectedAlternate(ctx context.Context, fn func(r RepositoryWithLocation) error) error { + repos, loaded, err := r.initialRepos(ctx) + if err != nil { + return err + } + if len(repos) == 0 { + return errNoValidAlternates + } + if attemptErr := r.attemptFirstConnectedRepo(ctx, repos, fn); attemptErr != nil { + if loaded { + return attemptErr + } + alternates, err := r.errorRepos(ctx) + if err != nil { + return err + } + if alternateErr := r.attemptFirstConnectedRepo(ctx, alternates, fn); alternateErr != nil { + return attemptErr + } + } + return nil +} + +// source connects to the original repository or returns an error. It will always use the same value +// of insecure as the original repository. Use when the request should only go to the initial repo. +func (r *blobMirroredRepository) source(ctx context.Context, fn func(r distribution.Repository) error) error { + repo, err := r.connect(ctx, r.locator.ref) + if err != nil { + return err + } + return fn(repo) +} + +// connect reuses or creates a connection to the provided reference, returning a repository instance +// or an error. This method expects that the connection only talks to the provided registry. +func (r *blobMirroredRepository) connect(ctx context.Context, ref reference.DockerImageReference) (RepositoryWithLocation, error) { + r.lock.Lock() + defer r.lock.Unlock() + + repo, ok := r.repos[ref] + if ok { + return repo, nil + } + locator := repositoryLocator{ + ref: ref, + } + repo, err := r.retriever.connectToRegistry(ctx, locator, ref != r.locator.ref || r.insecure) + if err != nil { + return nil, err + } + if r.repos == nil { + r.repos = make(map[reference.DockerImageReference]RepositoryWithLocation) + } + r.repos[ref] = repo + return repo, nil +} + +// blobMirroredManifest will sequentially retry manifest operations on a set of repositories determined +// by the repository list, caching manifest services locally as needed (manifest service is assumed +// to have local state and does so in the registry client). The individual manifest service is not +// thread safe, but methods on this interface are thread safe. +type blobMirroredManifest struct { + repo *blobMirroredRepository + options []distribution.ManifestServiceOption + + lock sync.Mutex + cache map[distribution.Repository]distribution.ManifestService +} + +var _ distribution.ManifestService = &blobMirroredManifest{} +var _ ManifestWithLocationService = &blobMirroredManifest{} + +// init retrieves or caches a manifets service for the provided repository, since each manifest +// service has local state. +func (f *blobMirroredManifest) init(ctx context.Context, r distribution.Repository) (distribution.ManifestService, error) { + f.lock.Lock() + defer f.lock.Unlock() + + ms := f.cache[r] + if ms != nil { + return ms, nil + } + ms, err := r.Manifests(ctx, f.options...) + if err != nil { + return nil, err + } + if f.cache == nil { + f.cache = make(map[distribution.Repository]distribution.ManifestService) + } + f.cache[r] = ms + return ms, nil +} + +// alternates invokes fn once per alternate repo until fn returns without error. +func (f *blobMirroredManifest) alternates(ctx context.Context, fn func(m distribution.ManifestService, repo RepositoryWithLocation) error) error { + return f.repo.alternates(ctx, func(repo RepositoryWithLocation) error { + ms, err := f.init(ctx, repo) + if err != nil { + return err + } + return fn(ms, repo) + }) +} + +// source invokes fn against the primary location. +func (f *blobMirroredManifest) source(ctx context.Context, fn func(r distribution.ManifestService) error) error { + return f.repo.source(ctx, func(r distribution.Repository) error { + ms, err := f.init(ctx, r) + if err != nil { + return err + } + return fn(ms) + }) +} + +func (f *blobMirroredManifest) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + var dgst digest.Digest + err := f.source(ctx, func(r distribution.ManifestService) error { + var err error + dgst, err = r.Put(ctx, manifest, options...) + return err + }) + return dgst, err +} + +func (f *blobMirroredManifest) Delete(ctx context.Context, dgst digest.Digest) error { + return f.source(ctx, func(r distribution.ManifestService) error { + return r.Delete(ctx, dgst) + }) +} + +func (f *blobMirroredManifest) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + var ok bool + err := f.alternates(ctx, func(m distribution.ManifestService, repo RepositoryWithLocation) error { + var err error + ok, err = m.Exists(ctx, dgst) + return err + }) + return ok, err +} + +func (f *blobMirroredManifest) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + var manifest distribution.Manifest + err := f.alternates(ctx, func(m distribution.ManifestService, repo RepositoryWithLocation) error { + var err error + manifest, err = m.Get(ctx, dgst, options...) + klog.V(5).Infof("get manifest for %s served from %#v: %v", dgst, m, err) + return err + }) + return manifest, err +} + +func (f *blobMirroredManifest) GetWithLocation(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, reference.DockerImageReference, error) { + var manifest distribution.Manifest + var ref = f.repo.locator.ref + err := f.alternates(ctx, func(m distribution.ManifestService, repo RepositoryWithLocation) error { + var err error + manifest, err = m.Get(ctx, dgst, options...) + klog.V(5).Infof("get manifest for %s served from %#v: %v", dgst, m, err) + if err == nil { + ref = repo.Ref() + } + return err + }) + return manifest, ref, err +} + +// blobMirroredBlobstore wraps the blob store and invokes retries on the repo. +type blobMirroredBlobstore struct { + repo *blobMirroredRepository +} + +var _ distribution.BlobService = blobMirroredBlobstore{} + +func (f blobMirroredBlobstore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + var data []byte + err := f.repo.alternates(ctx, func(r RepositoryWithLocation) error { + var err error + data, err = r.Blobs(ctx).Get(ctx, dgst) + klog.V(5).Infof("get for %s served from %s: %v", dgst, r.Named(), err) + return err + }) + return data, err +} + +func (f blobMirroredBlobstore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + var desc distribution.Descriptor + err := f.repo.alternates(ctx, func(r RepositoryWithLocation) error { + var err error + desc, err = r.Blobs(ctx).Stat(ctx, dgst) + return err + }) + return desc, err +} + +func (f blobMirroredBlobstore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error { + err := f.repo.firstConnectedAlternate(ctx, func(r RepositoryWithLocation) error { + return r.Blobs(ctx).ServeBlob(ctx, w, req, dgst) + }) + return err +} + +func (f blobMirroredBlobstore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + var rsc distribution.ReadSeekCloser + err := f.repo.alternates(ctx, func(r RepositoryWithLocation) error { + var err error + rsc, err = r.Blobs(ctx).Open(ctx, dgst) + return err + }) + return rsc, err +} + +func (f blobMirroredBlobstore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var bw distribution.BlobWriter + err := f.repo.source(ctx, func(r distribution.Repository) error { + var err error + bw, err = r.Blobs(ctx).Create(ctx, options...) + return err + }) + return bw, err +} + +func (f blobMirroredBlobstore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + var desc distribution.Descriptor + err := f.repo.source(ctx, func(r distribution.Repository) error { + var err error + desc, err = r.Blobs(ctx).Put(ctx, mediaType, p) + return err + }) + return desc, err +} + +func (f blobMirroredBlobstore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + var bw distribution.BlobWriter + err := f.repo.source(ctx, func(r distribution.Repository) error { + var err error + bw, err = r.Blobs(ctx).Resume(ctx, id) + return err + }) + return bw, err +} + +func (f blobMirroredBlobstore) Delete(ctx context.Context, dgst digest.Digest) error { + return f.repo.source(ctx, func(r distribution.Repository) error { + return r.Blobs(ctx).Delete(ctx, dgst) + }) +} + +// blobMirroredTags lazily accesses the source repository +type blobMirroredTags struct { + repo *blobMirroredRepository +} + +var _ distribution.TagService = blobMirroredTags{} + +func (f blobMirroredTags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + var desc distribution.Descriptor + err := f.repo.source(ctx, func(r distribution.Repository) error { + var err error + desc, err = r.Tags(ctx).Get(ctx, tag) + return err + }) + return desc, err +} + +func (f blobMirroredTags) All(ctx context.Context) ([]string, error) { + var tags []string + err := f.repo.source(ctx, func(r distribution.Repository) error { + var err error + tags, err = r.Tags(ctx).All(ctx) + return err + }) + return tags, err +} + +func (f blobMirroredTags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + var tags []string + err := f.repo.source(ctx, func(r distribution.Repository) error { + var err error + tags, err = r.Tags(ctx).Lookup(ctx, digest) + return err + }) + return tags, err +} + +func (f blobMirroredTags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + return f.repo.source(ctx, func(r distribution.Repository) error { + return r.Tags(ctx).Tag(ctx, tag, desc) + }) +} + +func (f blobMirroredTags) Untag(ctx context.Context, tag string) error { + return f.repo.source(ctx, func(r distribution.Repository) error { + return r.Tags(ctx).Untag(ctx, tag) + }) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/csr/README.md b/vendor/github.com/openshift/library-go/pkg/operator/csr/README.md new file mode 100644 index 0000000000..9802105c73 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/csr/README.md @@ -0,0 +1,16 @@ +You usually want to start with NewSimpleClientCertificateController. + +This package provides a control loop which takes as input +1. target secret name +2. cert common name +3. desired validity (recall that the signing cert can sign for less) + +The flow goes like this. +1. if secret contains a valid client cert good for at least five days or 50% of validity, do nothing. If not... +2. create new cert/key pair in memory +3. create CSR in the API. +4. watch CSR in the API until it is approved or denied +5. if denied, write degraded status and return +6. if approved, update the secret + +The secrets have annotations which match our other cert rotation secrets. \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/operator/csr/cert_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/csr/cert_controller.go new file mode 100644 index 0000000000..f2bb06ec70 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/csr/cert_controller.go @@ -0,0 +1,322 @@ +package csr + +import ( + "context" + "crypto/tls" + "crypto/x509/pkix" + "fmt" + "math/rand" + "time" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + + certificates "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + certificatesinformers "k8s.io/client-go/informers/certificates/v1" + corev1informers "k8s.io/client-go/informers/core/v1" + csrclient "k8s.io/client-go/kubernetes/typed/certificates/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + certificateslisters "k8s.io/client-go/listers/certificates/v1" + cache "k8s.io/client-go/tools/cache" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/keyutil" + "k8s.io/klog/v2" +) + +const ( + // TLSKeyFile is the name of tls key file in kubeconfigSecret + TLSKeyFile = "tls.key" + // TLSCertFile is the name of the tls cert file in kubeconfigSecret + TLSCertFile = "tls.crt" +) + +// ControllerResyncInterval is exposed so that integration tests can crank up the constroller sync speed. +var ControllerResyncInterval = 5 * time.Minute + +// CSROption includes options that is used to create and monitor csrs +type CSROption struct { + // ObjectMeta is the ObjectMeta shared by all created csrs. It should use GenerateName instead of Name + // to generate random csr names + ObjectMeta metav1.ObjectMeta + // Subject represents the subject of the client certificate used to create csrs + Subject *pkix.Name + // DNSNames represents DNS names used to create the client certificate + DNSNames []string + // SignerName is the name of the signer specified in the created csrs + SignerName string + + // EventFilterFunc matches csrs created with above options + EventFilterFunc factory.EventFilterFunc +} + +// ClientCertOption includes options that is used to create client certificate +type ClientCertOption struct { + // SecretNamespace is the namespace of the secret containing client certificate. + SecretNamespace string + // SecretName is the name of the secret containing client certificate. The secret will be created if + // it does not exist. + SecretName string + // AdditonalSecretData contains data that will be added into client certificate secret besides tls.key/tls.crt + AdditonalSecretData map[string][]byte +} + +// clientCertificateController implements the common logic of hub client certification creation/rotation. It +// creates a client certificate and rotates it before it becomes expired by using csrs. The client +// certificate generated is stored in a specific secret with the keys below: +// 1). tls.key: tls key file +// 2). tls.crt: tls cert file +type clientCertificateController struct { + ClientCertOption + CSROption + + hubCSRLister certificateslisters.CertificateSigningRequestLister + hubCSRClient csrclient.CertificateSigningRequestInterface + spokeCoreClient corev1client.CoreV1Interface + controllerName string + + // csrName is the name of csr created by controller and waiting for approval. + csrName string + + // keyData is the private key data used to created a csr + // csrName and keyData store the internal state of the controller. They are set after controller creates a new csr + // and cleared once the csr is approved and processed by controller. There are 4 combination of their values: + // 1. csrName empty, keyData empty: means we aren't trying to create a new client cert, our current one is valid + // 2. csrName set, keyData empty: there was bug + // 3. csrName set, keyData set: we are waiting for a new cert to be signed. + // 4. csrName empty, keydata set: the CSR failed to create, this shouldn't happen, it's a bug. + keyData []byte +} + +// NewClientCertificateController return an instance of clientCertificateController +func NewClientCertificateController( + clientCertOption ClientCertOption, + csrOption CSROption, + hubCSRInformer certificatesinformers.CertificateSigningRequestInformer, + hubCSRClient csrclient.CertificateSigningRequestInterface, + spokeSecretInformer corev1informers.SecretInformer, + spokeCoreClient corev1client.CoreV1Interface, + recorder events.Recorder, + controllerName string, +) factory.Controller { + c := clientCertificateController{ + ClientCertOption: clientCertOption, + CSROption: csrOption, + hubCSRLister: hubCSRInformer.Lister(), + hubCSRClient: hubCSRClient, + spokeCoreClient: spokeCoreClient, + controllerName: controllerName, + } + + return factory.New(). + WithFilteredEventsInformersQueueKeyFunc(func(obj runtime.Object) string { + key, _ := cache.MetaNamespaceKeyFunc(obj) + return key + }, func(obj interface{}) bool { + accessor, err := meta.Accessor(obj) + if err != nil { + return false + } + // only enqueue a specific secret + if accessor.GetNamespace() == c.SecretNamespace && accessor.GetName() == c.SecretName { + return true + } + return false + }, spokeSecretInformer.Informer()). + WithFilteredEventsInformersQueueKeyFunc(func(obj runtime.Object) string { + accessor, _ := meta.Accessor(obj) + return accessor.GetName() + }, c.EventFilterFunc, hubCSRInformer.Informer()). + WithSync(c.sync). + ResyncEvery(ControllerResyncInterval). + ToController(controllerName, recorder) +} + +func (c *clientCertificateController) sync(ctx context.Context, syncCtx factory.SyncContext) error { + // get secret containing client certificate + secret, err := c.spokeCoreClient.Secrets(c.SecretNamespace).Get(ctx, c.SecretName, metav1.GetOptions{}) + switch { + case errors.IsNotFound(err): + secret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: c.SecretNamespace, + Name: c.SecretName, + }, + } + case err != nil: + return fmt.Errorf("unable to get secret %q: %w", c.SecretNamespace+"/"+c.SecretName, err) + } + + // reconcile pending csr if exists + if len(c.csrName) > 0 { + newSecretConfig, err := c.syncCSR(secret) + if err != nil { + c.reset() + return err + } + if len(newSecretConfig) == 0 { + return nil + } + // append additional data into client certificate secret + for k, v := range c.AdditonalSecretData { + newSecretConfig[k] = v + } + secret.Data = newSecretConfig + // save the changes into secret + if err := c.saveSecret(secret); err != nil { + return err + } + syncCtx.Recorder().Eventf("ClientCertificateCreated", "A new client certificate for %s is available", c.controllerName) + c.reset() + return nil + } + + // create a csr to request new client certificate if + // a. there is no valid client certificate issued for the current cluster/agent + // b. client certificate exists and has less than a random percentage range from 20% to 25% of its life remaining + if c.hasValidClientCertificate(secret) { + notBefore, notAfter, err := getCertValidityPeriod(secret) + if err != nil { + return err + } + + total := notAfter.Sub(*notBefore) + remaining := notAfter.Sub(time.Now()) + klog.V(4).Infof("Client certificate for %s: time total=%v, remaining=%v, remaining/total=%v", c.controllerName, total, remaining, remaining.Seconds()/total.Seconds()) + threshold := jitter(0.2, 0.25) + if remaining.Seconds()/total.Seconds() > threshold { + // Do nothing if the client certificate is valid and has more than a random percentage range from 20% to 25% of its life remaining + klog.V(4).Infof("Client certificate for %s is valid and has more than %.2f%% of its life remaining", c.controllerName, threshold*100) + return nil + } + syncCtx.Recorder().Eventf("CertificateRotationStarted", "The current client certificate for %s expires in %v. Start certificate rotation", c.controllerName, remaining.Round(time.Second)) + } else { + syncCtx.Recorder().Eventf("NoValidCertificateFound", "No valid client certificate for %s is found. Bootstrap is required", c.controllerName) + } + + // create a new private key + c.keyData, err = keyutil.MakeEllipticPrivateKeyPEM() + if err != nil { + return err + } + + // create a csr + c.csrName, err = c.createCSR(ctx) + if err != nil { + c.reset() + return err + } + syncCtx.Recorder().Eventf("CSRCreated", "A csr %q is created for %s", c.csrName, c.controllerName) + return nil +} + +func (c *clientCertificateController) syncCSR(secret *corev1.Secret) (map[string][]byte, error) { + // skip if there is no ongoing csr + if len(c.csrName) == 0 { + return nil, fmt.Errorf("no ongoing csr") + } + + // skip if csr no longer exists + csr, err := c.hubCSRLister.Get(c.csrName) + switch { + case errors.IsNotFound(err): + // fallback to fetching csr from hub apiserver in case it is not cached by informer yet + csr, err = c.hubCSRClient.Get(context.Background(), c.csrName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil, fmt.Errorf("unable to get csr %q. It might have already been deleted.", c.csrName) + } + case err != nil: + return nil, err + } + + // skip if csr is not approved yet + if !isCSRApproved(csr) { + return nil, nil + } + + // skip if csr has no certificate in its status yet + if len(csr.Status.Certificate) == 0 { + return nil, nil + } + + klog.V(4).Infof("Sync csr %v", c.csrName) + // check if cert in csr status matches with the corresponding private key + if c.keyData == nil { + return nil, fmt.Errorf("No private key found for certificate in csr: %s", c.csrName) + } + _, err = tls.X509KeyPair(csr.Status.Certificate, c.keyData) + if err != nil { + return nil, fmt.Errorf("Private key does not match with the certificate in csr: %s", c.csrName) + } + + data := map[string][]byte{ + TLSCertFile: csr.Status.Certificate, + TLSKeyFile: c.keyData, + } + + return data, nil +} + +func (c *clientCertificateController) createCSR(ctx context.Context) (string, error) { + privateKey, err := keyutil.ParsePrivateKeyPEM(c.keyData) + if err != nil { + return "", fmt.Errorf("invalid private key for certificate request: %w", err) + } + csrData, err := certutil.MakeCSR(privateKey, c.Subject, c.DNSNames, nil) + if err != nil { + return "", fmt.Errorf("unable to generate certificate request: %w", err) + } + + csr := &certificates.CertificateSigningRequest{ + ObjectMeta: c.ObjectMeta, + Spec: certificates.CertificateSigningRequestSpec{ + Request: csrData, + Usages: []certificates.KeyUsage{ + certificates.UsageDigitalSignature, + certificates.UsageKeyEncipherment, + certificates.UsageClientAuth, + }, + SignerName: c.SignerName, + }, + } + + req, err := c.hubCSRClient.Create(ctx, csr, metav1.CreateOptions{}) + if err != nil { + return "", err + } + return req.Name, nil +} + +func (c *clientCertificateController) saveSecret(secret *corev1.Secret) error { + var err error + if secret.ResourceVersion == "" { + _, err = c.spokeCoreClient.Secrets(c.SecretNamespace).Create(context.Background(), secret, metav1.CreateOptions{}) + return err + } + _, err = c.spokeCoreClient.Secrets(c.SecretNamespace).Update(context.Background(), secret, metav1.UpdateOptions{}) + return err +} + +func (c *clientCertificateController) reset() { + c.csrName = "" + c.keyData = nil +} + +func (c *clientCertificateController) hasValidClientCertificate(secret *corev1.Secret) bool { + if valid, err := IsCertificateValid(secret.Data[TLSCertFile], c.Subject); err == nil { + return valid + } + return false +} + +func jitter(percentage float64, maxFactor float64) float64 { + if maxFactor <= 0.0 { + maxFactor = 1.0 + } + newPercentage := percentage + percentage*rand.Float64()*maxFactor + return newPercentage +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/csr/certificate.go b/vendor/github.com/openshift/library-go/pkg/operator/csr/certificate.go new file mode 100644 index 0000000000..1aeb661454 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/csr/certificate.go @@ -0,0 +1,135 @@ +package csr + +import ( + "crypto/x509/pkix" + "errors" + "fmt" + "time" + + certificatesv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + certutil "k8s.io/client-go/util/cert" + "k8s.io/klog/v2" +) + +// IsCertificateValid return true if +// 1) All certs in client certificate are not expired. +// 2) At least one cert matches the given subject if specified +func IsCertificateValid(certData []byte, subject *pkix.Name) (bool, error) { + certs, err := certutil.ParseCertsPEM(certData) + if err != nil { + return false, errors.New("unable to parse certificate") + } + + if len(certs) == 0 { + return false, errors.New("No cert found in certificate") + } + + now := time.Now() + // make sure no cert in the certificate chain expired + for _, cert := range certs { + if now.After(cert.NotAfter) { + klog.V(4).Infof("Part of the certificate is expired: %v", cert.NotAfter) + return false, nil + } + } + + if subject == nil { + return true, nil + } + + // check subject of certificates + for _, cert := range certs { + if cert.Subject.CommonName != subject.CommonName { + continue + } + return true, nil + } + + klog.V(4).Infof("Certificate is not issued for subject (cn=%s)", subject.CommonName) + return false, nil +} + +// getCertValidityPeriod returns the validity period of the client certificate in the secret +func getCertValidityPeriod(secret *corev1.Secret) (*time.Time, *time.Time, error) { + if secret.Data == nil { + return nil, nil, fmt.Errorf("no client certificate found in secret %q", secret.Namespace+"/"+secret.Name) + } + + certData, ok := secret.Data[TLSCertFile] + if !ok { + return nil, nil, fmt.Errorf("no client certificate found in secret %q", secret.Namespace+"/"+secret.Name) + } + + certs, err := certutil.ParseCertsPEM(certData) + if err != nil { + return nil, nil, fmt.Errorf("unable to parse TLS certificates: %w", err) + } + + if len(certs) == 0 { + return nil, nil, errors.New("No cert found in certificate") + } + + // find out the validity period for all certs in the certificate chain + var notBefore, notAfter *time.Time + for index, cert := range certs { + if index == 0 { + notBefore = &cert.NotBefore + notAfter = &cert.NotAfter + continue + } + + if notBefore.Before(cert.NotBefore) { + notBefore = &cert.NotBefore + } + + if notAfter.After(cert.NotAfter) { + notAfter = &cert.NotAfter + } + } + + return notBefore, notAfter, nil +} + +// BuildKubeconfig builds a kubeconfig based on a rest config template with a cert/key pair +func BuildKubeconfig(clientConfig *restclient.Config, certPath, keyPath string) clientcmdapi.Config { + // Build kubeconfig. + kubeconfig := clientcmdapi.Config{ + // Define a cluster stanza based on the bootstrap kubeconfig. + Clusters: map[string]*clientcmdapi.Cluster{"default-cluster": { + Server: clientConfig.Host, + InsecureSkipTLSVerify: false, + CertificateAuthorityData: clientConfig.CAData, + }}, + // Define auth based on the obtained client cert. + AuthInfos: map[string]*clientcmdapi.AuthInfo{"default-auth": { + ClientCertificate: certPath, + ClientKey: keyPath, + }}, + // Define a context that connects the auth info and cluster, and set it as the default + Contexts: map[string]*clientcmdapi.Context{"default-context": { + Cluster: "default-cluster", + AuthInfo: "default-auth", + Namespace: "configuration", + }}, + CurrentContext: "default-context", + } + + return kubeconfig +} + +// isCSRApproved returns true if the given csr has been approved +func isCSRApproved(csr *certificatesv1.CertificateSigningRequest) bool { + approved := false + for _, condition := range csr.Status.Conditions { + if condition.Type == certificatesv1.CertificateDenied { + return false + } else if condition.Type == certificatesv1.CertificateApproved { + approved = true + } + } + + return approved +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/csr/csr_approver.go b/vendor/github.com/openshift/library-go/pkg/operator/csr/csr_approver.go new file mode 100644 index 0000000000..036bf54cbe --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/csr/csr_approver.go @@ -0,0 +1,281 @@ +package csr + +import ( + "context" + "crypto/x509" + "encoding/pem" + "fmt" + + certapiv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + certv1informers "k8s.io/client-go/informers/certificates/v1" + certv1client "k8s.io/client-go/kubernetes/typed/certificates/v1" + certv1listers "k8s.io/client-go/listers/certificates/v1" + "k8s.io/klog/v2" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +type CSRApprovalDecision string + +const ( + CSRApproved CSRApprovalDecision = "Approved" + CSRDenied CSRApprovalDecision = "Denied" + CSRNoOpinion CSRApprovalDecision = "NoOpinion" +) + +type CSRApprover interface { + Approve(csrObj *certapiv1.CertificateSigningRequest, x509CSR *x509.CertificateRequest) (approvalStatus CSRApprovalDecision, denyReason string, err error) +} + +type csrApproverController struct { + csrClient certv1client.CertificateSigningRequestInterface + csrLister certv1listers.CertificateSigningRequestLister + + csrApprover CSRApprover +} + +// NewCSRApproverController returns a controller that is observing the CSR API +// for a CSR of a given name. If such a CSR exists, it runs the `csrApprover.Approve()` +// against it and either denies, approves or leaves the CSR. +// +// If operatorClient is nil, the controller will log the errors instead of reporting +// them in an operator status. +func NewCSRApproverController( + controllerName string, + operatorClient v1helpers.OperatorClient, + csrClient certv1client.CertificateSigningRequestInterface, + csrInformers certv1informers.CertificateSigningRequestInformer, + csrFilter CSRFilter, + csrApprover CSRApprover, + eventsRecorder events.Recorder, +) factory.Controller { + c := &csrApproverController{ + csrClient: csrClient, + csrLister: csrInformers.Lister(), + csrApprover: csrApprover, + } + + csrFilterConverted := func(csr interface{}) bool { + csrObj, ok := csr.(*certapiv1.CertificateSigningRequest) + if !ok { + return false + } + return csrFilter.Matches(csrObj) + } + + f := factory.New(). + WithSync(c.sync). + WithFilteredEventsInformersQueueKeyFunc(factory.ObjectNameToKey, csrFilterConverted, csrInformers.Informer()) + + if operatorClient != nil { + f.WithSyncDegradedOnError(operatorClient) + } + + return f.ToController( + "WebhookAuthenticatorCertApprover_"+controllerName, + eventsRecorder.WithComponentSuffix("webhook-authenticator-cert-approver-"+controllerName), + ) +} + +func (c *csrApproverController) sync(ctx context.Context, syncCtx factory.SyncContext) error { + csr, err := c.csrLister.Get(syncCtx.QueueKey()) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return err + } + + if approved, denied := getCertApprovalCondition(&csr.Status); approved || denied { + return nil + } + + csrCopy := csr.DeepCopy() + + csrPEM, _ := pem.Decode(csr.Spec.Request) + if csrPEM == nil { + return fmt.Errorf("failed to PEM-parse the CSR block in .spec.request: no CSRs were found") + } + + x509CSR, err := x509.ParseCertificateRequest(csrPEM.Bytes) + if err != nil { + return fmt.Errorf("failed to parse the CSR bytes: %v", err) + } + + if x509CSR.Subject.CommonName == csr.Spec.Username { + c.denyCSR(ctx, csrCopy, "IllegitimateRequester", "requester cannot request certificates for themselves", syncCtx.Recorder()) + } + + csrDecision, denyReason, err := c.csrApprover.Approve(csr, x509CSR) + if err != nil { + return c.denyCSR(ctx, csrCopy, "CSRApprovingFailed", fmt.Sprintf("there was an error during CSR approval: %v", err), syncCtx.Recorder()) + } + + switch csrDecision { + case CSRDenied: + return c.denyCSR(ctx, csrCopy, "CSRDenied", denyReason, syncCtx.Recorder()) + case CSRApproved: + return c.approveCSR(ctx, csrCopy, syncCtx.Recorder()) + case CSRNoOpinion: + fallthrough + default: + return nil + } +} + +func (c *csrApproverController) denyCSR(ctx context.Context, csrCopy *certapiv1.CertificateSigningRequest, reason, message string, eventsRecorder events.Recorder) error { + csrCopy.Status.Conditions = append(csrCopy.Status.Conditions, + certapiv1.CertificateSigningRequestCondition{ + Type: certapiv1.CertificateDenied, + Status: corev1.ConditionTrue, + Reason: reason, + Message: message, + }, + ) + + eventsRecorder.Eventf("CSRDenial", "The CSR %q has been denied: %s - %s", csrCopy.Name, reason, message) + _, err := c.csrClient.UpdateApproval(ctx, csrCopy.Name, csrCopy, v1.UpdateOptions{}) + return err +} + +func (c *csrApproverController) approveCSR(ctx context.Context, csrCopy *certapiv1.CertificateSigningRequest, eventsRecorder events.Recorder) error { + csrCopy.Status.Conditions = append(csrCopy.Status.Conditions, + certapiv1.CertificateSigningRequestCondition{ + Type: certapiv1.CertificateApproved, + Status: corev1.ConditionTrue, + Reason: "AutoApproved", + Message: fmt.Sprintf("Auto-approved CSR %q", csrCopy.Name), + }) + + eventsRecorder.Eventf("CSRApproval", "The CSR %q has been approved", csrCopy.Name) + _, err := c.csrClient.UpdateApproval(ctx, csrCopy.Name, csrCopy, v1.UpdateOptions{}) + return err +} + +func getCertApprovalCondition(status *certapiv1.CertificateSigningRequestStatus) (approved bool, denied bool) { + for _, c := range status.Conditions { + if c.Type == certapiv1.CertificateApproved { + approved = true + } + if c.Type == certapiv1.CertificateDenied { + denied = true + } + } + return +} + +type ServiceAccountApprover struct { + saGroups sets.String // saGroups is the set of groups for the SA expected to have created the CSR + saName string + expectedSubject string +} + +// ServiceAccountApprover approves CSRs with a given subject issued by the provided service account +func NewServiceAccountApprover(saNamespace, saName, expectedSubject string, additionalGroups ...string) *ServiceAccountApprover { + saGroups := append(serviceaccount.MakeGroupNames(saNamespace), "system:authenticated") + + return &ServiceAccountApprover{ + saName: serviceaccount.MakeUsername(saNamespace, saName), + saGroups: sets.NewString(append(saGroups, additionalGroups...)...), + expectedSubject: expectedSubject, + } +} + +func (a *ServiceAccountApprover) Approve(csrObj *certapiv1.CertificateSigningRequest, x509CSR *x509.CertificateRequest) (approvalStatus CSRApprovalDecision, denyReason string, err error) { + if csrObj == nil || x509CSR == nil { + return CSRDenied, "Error", fmt.Errorf("received a 'nil' CSR") + } + + if csrObj.Spec.Username != a.saName { + return CSRDenied, fmt.Sprintf("CSR %q was created by an unexpected user: %q", csrObj.Name, csrObj.Spec.Username), nil + } + + if csrGroups := sets.NewString(csrObj.Spec.Groups...); !csrGroups.Equal(a.saGroups) { + return CSRDenied, fmt.Sprintf("CSR %q was created by a user with unexpected groups: %v", csrObj.Name, csrGroups.List()), nil + } + + if expectedSubject := a.expectedSubject; x509CSR.Subject.String() != expectedSubject { + return CSRDenied, fmt.Sprintf("expected the CSR's subject to be %q, but it is %q", expectedSubject, x509CSR.Subject.String()), nil + } + + return CSRApproved, "", nil + +} + +type CSRFilter interface { + Matches(csr *certapiv1.CertificateSigningRequest) bool +} + +type AndFilter struct { + a, b CSRFilter +} + +func NewAndFilter(a, b CSRFilter) *AndFilter { + return &AndFilter{a, b} +} + +func (f *AndFilter) Matches(csr *certapiv1.CertificateSigningRequest) bool { + return f.a.Matches(csr) && f.b.Matches(csr) +} + +type OrFilter struct { + a, b CSRFilter +} + +func NewOrFilter(a, b CSRFilter) *OrFilter { + return &OrFilter{a, b} +} + +func (f *OrFilter) Matches(csr *certapiv1.CertificateSigningRequest) bool { + return f.a.Matches(csr) || f.b.Matches(csr) +} + +type LabelFilter struct { + labelSelector labels.Selector +} + +func NewLabelFilter(selector labels.Selector) *LabelFilter { + return &LabelFilter{selector} +} + +func (f *LabelFilter) Matches(csr *certapiv1.CertificateSigningRequest) bool { + return f.labelSelector.Matches(labels.Set(csr.Labels)) +} + +type NamesFilter struct { + names sets.String +} + +func NewNamesFilter(names ...string) *NamesFilter { + return &NamesFilter{sets.NewString(names...)} +} + +func (f *NamesFilter) Matches(csr *certapiv1.CertificateSigningRequest) bool { + return f.names.Has(csr.Name) +} + +type RequestCommonNameFilter struct { + commonNames sets.String +} + +func NewRequestCommonNameFilter(commonNames ...string) *RequestCommonNameFilter { + return &RequestCommonNameFilter{sets.NewString(commonNames...)} +} + +func (f *RequestCommonNameFilter) Match(csr *certapiv1.CertificateSigningRequest) bool { + x509CSR, err := x509.ParseCertificateRequest(csr.Spec.Request) + if err != nil { + klog.V(4).Infof("failed to parse the CSR .spec.request of %q: %v", csr.Name, err) + return false + } + + return f.commonNames.Has(x509CSR.Subject.CommonName) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/csr/simple_clientcert_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/csr/simple_clientcert_controller.go new file mode 100644 index 0000000000..af78eba126 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/csr/simple_clientcert_controller.go @@ -0,0 +1,50 @@ +package csr + +import ( + "crypto/x509/pkix" + "fmt" + + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NewSimpleClientCertificateController creates a controller that keeps a secret up to date with a client-cert +// valid against the kube-apiserver. This version only works in a single cluster. The base library allows +// the secret in one cluster and the CSR in another. +func NewSimpleClientCertificateController( + secretNamespace, secretName string, + commonName string, groups []string, + kubeInformers informers.SharedInformerFactory, + kubeClient kubernetes.Interface, + recorder events.Recorder, +) factory.Controller { + certOptions := ClientCertOption{ + SecretNamespace: secretNamespace, + SecretName: secretName, + } + csrOptions := CSROption{ + ObjectMeta: metav1.ObjectMeta{}, + Subject: &pkix.Name{ + Organization: groups, + CommonName: commonName, + }, + SignerName: "kubernetes.io/kube-apiserver-client", + EventFilterFunc: nil, + } + controllerName := fmt.Sprintf("client-cert-%s[%s]", secretName, secretNamespace) + + return NewClientCertificateController( + certOptions, + csrOptions, + kubeInformers.Certificates().V1().CertificateSigningRequests(), + kubeClient.CertificatesV1().CertificateSigningRequests(), + kubeInformers.Core().V1().Secrets(), + kubeClient.CoreV1(), + recorder, + controllerName, + ) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS new file mode 100644 index 0000000000..4f189b7087 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS @@ -0,0 +1,8 @@ +reviewers: + - mfojtik + - deads2k + - sttts +approvers: + - mfojtik + - deads2k + - sttts diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go new file mode 100644 index 0000000000..68b2b005c1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go @@ -0,0 +1,219 @@ +package events + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// Recorder is a simple event recording interface. +type Recorder interface { + Event(reason, message string) + Eventf(reason, messageFmt string, args ...interface{}) + Warning(reason, message string) + Warningf(reason, messageFmt string, args ...interface{}) + + // ForComponent allows to fiddle the component name before sending the event to sink. + // Making more unique components will prevent the spam filter in upstream event sink from dropping + // events. + ForComponent(componentName string) Recorder + + // WithComponentSuffix is similar to ForComponent except it just suffix the current component name instead of overriding. + WithComponentSuffix(componentNameSuffix string) Recorder + + // ComponentName returns the current source component name for the event. + // This allows to suffix the original component name with 'sub-component'. + ComponentName() string + + Shutdown() +} + +// podNameEnv is a name of environment variable inside container that specifies the name of the current replica set. +// This replica set name is then used as a source/involved object for operator events. +const podNameEnv = "POD_NAME" + +// podNameEnvFunc allows to override the way we get the environment variable value (for unit tests). +var podNameEnvFunc = func() string { + return os.Getenv(podNameEnv) +} + +// GetControllerReferenceForCurrentPod provides an object reference to a controller managing the pod/container where this process runs. +// The pod name must be provided via the POD_NAME name. +// Even if this method returns an error, it always return valid reference to the namespace. It allows the callers to control the logging +// and decide to fail or accept the namespace. +func GetControllerReferenceForCurrentPod(client kubernetes.Interface, targetNamespace string, reference *corev1.ObjectReference) (*corev1.ObjectReference, error) { + if reference == nil { + // Try to get the pod name via POD_NAME environment variable + reference := &corev1.ObjectReference{Kind: "Pod", Name: podNameEnvFunc(), Namespace: targetNamespace} + if len(reference.Name) != 0 { + return GetControllerReferenceForCurrentPod(client, targetNamespace, reference) + } + // If that fails, lets try to guess the pod by listing all pods in namespaces and using the first pod in the list + reference, err := guessControllerReferenceForNamespace(client.CoreV1().Pods(targetNamespace)) + if err != nil { + // If this fails, do not give up with error but instead use the namespace as controller reference for the pod + // NOTE: This is last resort, if we see this often it might indicate something is wrong in the cluster. + // In some cases this might help with flakes. + return getControllerReferenceForNamespace(targetNamespace), err + } + return GetControllerReferenceForCurrentPod(client, targetNamespace, reference) + } + + switch reference.Kind { + case "Pod": + pod, err := client.CoreV1().Pods(reference.Namespace).Get(context.TODO(), reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if podController := metav1.GetControllerOf(pod); podController != nil { + return GetControllerReferenceForCurrentPod(client, targetNamespace, makeObjectReference(podController, targetNamespace)) + } + // This is a bare pod without any ownerReference + return makeObjectReference(&metav1.OwnerReference{Kind: "Pod", Name: pod.Name, UID: pod.UID, APIVersion: "v1"}, pod.Namespace), nil + case "ReplicaSet": + rs, err := client.AppsV1().ReplicaSets(reference.Namespace).Get(context.TODO(), reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if rsController := metav1.GetControllerOf(rs); rsController != nil { + return GetControllerReferenceForCurrentPod(client, targetNamespace, makeObjectReference(rsController, targetNamespace)) + } + // This is a replicaSet without any ownerReference + return reference, nil + default: + return reference, nil + } +} + +// getControllerReferenceForNamespace returns an object reference to the given namespace. +func getControllerReferenceForNamespace(targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: "Namespace", + Namespace: targetNamespace, + Name: targetNamespace, + APIVersion: "v1", + } +} + +// makeObjectReference makes object reference from ownerReference and target namespace +func makeObjectReference(owner *metav1.OwnerReference, targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: owner.Kind, + Namespace: targetNamespace, + Name: owner.Name, + UID: owner.UID, + APIVersion: owner.APIVersion, + } +} + +// guessControllerReferenceForNamespace tries to guess what resource to reference. +func guessControllerReferenceForNamespace(client corev1client.PodInterface) (*corev1.ObjectReference, error) { + pods, err := client.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + if len(pods.Items) == 0 { + return nil, fmt.Errorf("unable to setup event recorder as %q env variable is not set and there are no pods", podNameEnv) + } + + for _, pod := range pods.Items { + ownerRef := metav1.GetControllerOf(&pod) + if ownerRef == nil { + continue + } + return &corev1.ObjectReference{ + Kind: ownerRef.Kind, + Namespace: pod.Namespace, + Name: ownerRef.Name, + UID: ownerRef.UID, + APIVersion: ownerRef.APIVersion, + }, nil + } + return nil, errors.New("can't guess controller ref") +} + +// NewRecorder returns new event recorder. +func NewRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return &recorder{ + eventClient: client, + involvedObjectRef: involvedObjectRef, + sourceComponent: sourceComponentName, + } +} + +// recorder is an implementation of Recorder interface. +type recorder struct { + eventClient corev1client.EventInterface + involvedObjectRef *corev1.ObjectReference + sourceComponent string +} + +func (r *recorder) ComponentName() string { + return r.sourceComponent +} + +func (r *recorder) Shutdown() {} + +func (r *recorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := *r + newRecorderForComponent.sourceComponent = componentName + return &newRecorderForComponent +} + +func (r *recorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Event emits the normal type event and allow formatting of message. +func (r *recorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warning emits the warning type event and allow formatting of message. +func (r *recorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Event emits the normal type event. +func (r *recorder) Event(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeNormal, reason, message) + if _, err := r.eventClient.Create(context.TODO(), event, metav1.CreateOptions{}); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +// Warning emits the warning type event. +func (r *recorder) Warning(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeWarning, reason, message) + if _, err := r.eventClient.Create(context.TODO(), event, metav1.CreateOptions{}); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +func makeEvent(involvedObjRef *corev1.ObjectReference, sourceComponent string, eventType, reason, message string) *corev1.Event { + currentTime := metav1.Time{Time: time.Now()} + event := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", involvedObjRef.Name, currentTime.UnixNano()), + Namespace: involvedObjRef.Namespace, + }, + InvolvedObject: *involvedObjRef, + Reason: reason, + Message: message, + Type: eventType, + Count: 1, + FirstTimestamp: currentTime, + LastTimestamp: currentTime, + } + event.Source.Component = sourceComponent + return event +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go new file mode 100644 index 0000000000..7c6856c350 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go @@ -0,0 +1,79 @@ +package events + +import ( + "fmt" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +type inMemoryEventRecorder struct { + events []*corev1.Event + source string + sync.Mutex +} + +// inMemoryDummyObjectReference is used for fake events. +var inMemoryDummyObjectReference = corev1.ObjectReference{ + Kind: "Pod", + Namespace: "dummy", + Name: "dummy", + APIVersion: "v1", +} + +type InMemoryRecorder interface { + Events() []*corev1.Event + Recorder +} + +// NewInMemoryRecorder provides event recorder that stores all events recorded in memory and allow to replay them using the Events() method. +// This recorder should be only used in unit tests. +func NewInMemoryRecorder(sourceComponent string) InMemoryRecorder { + return &inMemoryEventRecorder{events: []*corev1.Event{}, source: sourceComponent} +} + +func (r *inMemoryEventRecorder) ComponentName() string { + return r.source +} + +func (r *inMemoryEventRecorder) Shutdown() {} + +func (r *inMemoryEventRecorder) ForComponent(component string) Recorder { + r.Lock() + defer r.Unlock() + r.source = component + return r +} + +func (r *inMemoryEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Events returns list of recorded events +func (r *inMemoryEventRecorder) Events() []*corev1.Event { + return r.events +} + +func (r *inMemoryEventRecorder) Event(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeNormal, reason, message) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *inMemoryEventRecorder) Warning(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeWarning, reason, message) + klog.Info(event.String()) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go new file mode 100644 index 0000000000..24796c807c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go @@ -0,0 +1,51 @@ +package events + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +type LoggingEventRecorder struct { + component string +} + +// NewLoggingEventRecorder provides event recorder that will log all recorded events via klog. +func NewLoggingEventRecorder(component string) Recorder { + return &LoggingEventRecorder{component: component} +} + +func (r *LoggingEventRecorder) ComponentName() string { + return r.component +} + +func (r *LoggingEventRecorder) ForComponent(component string) Recorder { + newRecorder := *r + newRecorder.component = component + return &newRecorder +} + +func (r *LoggingEventRecorder) Shutdown() {} + +func (r *LoggingEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *LoggingEventRecorder) Event(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeNormal, reason, message) + klog.Info(event.String()) +} + +func (r *LoggingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *LoggingEventRecorder) Warning(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeWarning, reason, message) + klog.Warning(event.String()) +} + +func (r *LoggingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go new file mode 100644 index 0000000000..39f9eb5962 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go @@ -0,0 +1,166 @@ +package events + +import ( + "fmt" + "strings" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog/v2" +) + +// NewKubeRecorder returns new event recorder with tweaked correlator options. +func NewKubeRecorderWithOptions(client corev1client.EventInterface, options record.CorrelatorOptions, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return (&upstreamRecorder{ + client: client, + component: sourceComponentName, + involvedObjectRef: involvedObjectRef, + options: options, + fallbackRecorder: NewRecorder(client, sourceComponentName, involvedObjectRef), + }).ForComponent(sourceComponentName) +} + +// NewKubeRecorder returns new event recorder with default correlator options. +func NewKubeRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return NewKubeRecorderWithOptions(client, record.CorrelatorOptions{}, sourceComponentName, involvedObjectRef) +} + +// upstreamRecorder is an implementation of Recorder interface. +type upstreamRecorder struct { + client corev1client.EventInterface + component string + broadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + involvedObjectRef *corev1.ObjectReference + options record.CorrelatorOptions + + // shuttingDown indicates that the broadcaster for this recorder is being shut down + shuttingDown bool + shutdownMutex sync.RWMutex + + // fallbackRecorder is used when the kube recorder is shutting down + // in that case we create the events directly. + fallbackRecorder Recorder +} + +// RecommendedClusterSingletonCorrelatorOptions provides recommended event correlator options for components that produce +// many events (like operators). +func RecommendedClusterSingletonCorrelatorOptions() record.CorrelatorOptions { + return record.CorrelatorOptions{ + BurstSize: 60, // default: 25 (change allows a single source to send 50 events about object per minute) + QPS: 1. / 1., // default: 1/300 (change allows refill rate to 1 new event every 1s) + KeyFunc: func(event *corev1.Event) (aggregateKey string, localKey string) { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + // By default, KeyFunc don't use message for aggregation, this cause events with different message, but same reason not be lost as "similar events". + event.Message, + }, ""), event.Message + }, + } +} + +var eventsCounterMetric = metrics.NewCounterVec(&metrics.CounterOpts{ + Subsystem: "event_recorder", + Name: "total_events_count", + Help: "Total count of events processed by this event recorder per involved object", + StabilityLevel: metrics.ALPHA, +}, []string{"severity"}) + +func init() { + (&sync.Once{}).Do(func() { + legacyregistry.MustRegister(eventsCounterMetric) + }) +} + +func (r *upstreamRecorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := upstreamRecorder{ + client: r.client, + fallbackRecorder: r.fallbackRecorder.WithComponentSuffix(componentName), + options: r.options, + involvedObjectRef: r.involvedObjectRef, + shuttingDown: r.shuttingDown, + } + + // tweak the event correlator, so we don't loose important events. + broadcaster := record.NewBroadcasterWithCorrelatorOptions(r.options) + broadcaster.StartLogging(klog.Infof) + broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: newRecorderForComponent.client}) + + newRecorderForComponent.eventRecorder = broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: componentName}) + newRecorderForComponent.broadcaster = broadcaster + newRecorderForComponent.component = componentName + + return &newRecorderForComponent +} + +func (r *upstreamRecorder) Shutdown() { + r.shutdownMutex.Lock() + r.shuttingDown = true + r.shutdownMutex.Unlock() + // Wait for broadcaster to flush events (this is blocking) + // TODO: There is still race condition in upstream that might cause panic() on events recorded after the shutdown + // is called as the event recording is not-blocking (go routine based). + r.broadcaster.Shutdown() +} + +func (r *upstreamRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *upstreamRecorder) ComponentName() string { + return r.component +} + +// Eventf emits the normal type event and allow formatting of message. +func (r *upstreamRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warningf emits the warning type event and allow formatting of message. +func (r *upstreamRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *upstreamRecorder) incrementEventsCounter(severity string) { + if r.involvedObjectRef == nil { + return + } + eventsCounterMetric.WithLabelValues(severity).Inc() +} + +// Event emits the normal type event. +func (r *upstreamRecorder) Event(reason, message string) { + r.shutdownMutex.RLock() + defer r.shutdownMutex.RUnlock() + defer r.incrementEventsCounter(corev1.EventTypeNormal) + if r.shuttingDown { + r.fallbackRecorder.Event(reason, message) + return + } + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeNormal, reason, message) +} + +// Warning emits the warning type event. +func (r *upstreamRecorder) Warning(reason, message string) { + r.shutdownMutex.RLock() + defer r.shutdownMutex.RUnlock() + defer r.incrementEventsCounter(corev1.EventTypeWarning) + if r.shuttingDown { + r.fallbackRecorder.Warning(reason, message) + return + } + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeWarning, reason, message) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go new file mode 100644 index 0000000000..294770f3e0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go @@ -0,0 +1,77 @@ +package management + +import ( + v1 "github.com/openshift/api/operator/v1" +) + +var ( + allowOperatorUnmanagedState = true + allowOperatorRemovedState = true +) + +// SetOperatorAlwaysManaged is one time choice when an operator want to opt-out from supporting the "unmanaged" state. +// This is a case of control plane operators or operators that are required to always run otherwise the cluster will +// get into unstable state or critical components will stop working. +func SetOperatorAlwaysManaged() { + allowOperatorUnmanagedState = false +} + +// SetOperatorUnmanageable is one time choice when an operator wants to support the "unmanaged" state. +// This is the default setting, provided here mostly for unit tests. +func SetOperatorUnmanageable() { + allowOperatorUnmanagedState = true +} + +// SetOperatorNotRemovable is one time choice the operator author can make to indicate the operator does not support +// removing of his operand. This makes sense for operators like kube-apiserver where removing operand will lead to a +// bricked, non-automatically recoverable state. +func SetOperatorNotRemovable() { + allowOperatorRemovedState = false +} + +// SetOperatorRemovable is one time choice the operator author can make to indicate the operator supports +// removing of his operand. +// This is the default setting, provided here mostly for unit tests. +func SetOperatorRemovable() { + allowOperatorRemovedState = true +} + +// IsOperatorAlwaysManaged means the operator can't be set to unmanaged state. +func IsOperatorAlwaysManaged() bool { + return !allowOperatorUnmanagedState +} + +// IsOperatorNotRemovable means the operator can't be set to removed state. +func IsOperatorNotRemovable() bool { + return !allowOperatorRemovedState +} + +// IsOperatorRemovable means the operator can be set to removed state. +func IsOperatorRemovable() bool { + return allowOperatorRemovedState +} + +func IsOperatorUnknownState(state v1.ManagementState) bool { + switch state { + case v1.Managed, v1.Removed, v1.Unmanaged: + return false + default: + return true + } +} + +// IsOperatorManaged indicates whether the operator management state allows the control loop to proceed and manage the operand. +func IsOperatorManaged(state v1.ManagementState) bool { + if IsOperatorAlwaysManaged() || IsOperatorNotRemovable() { + return true + } + switch state { + case v1.Managed: + return true + case v1.Removed: + return false + case v1.Unmanaged: + return false + } + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/args.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/args.go new file mode 100644 index 0000000000..e1a165e63f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/args.go @@ -0,0 +1,61 @@ +package v1helpers + +import ( + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// FlagsFromUnstructured process the unstructured arguments usually retrieved from an operator's configuration file under a specific key. +// There are only two supported/valid types for arguments, that is []sting and/or string. +// Passing a different type yield an error. +// +// Use ToFlagSlice function to get a slice of string flags. +func FlagsFromUnstructured(unstructuredArgs map[string]interface{}) (map[string][]string, error) { + return flagsFromUnstructured(unstructuredArgs) +} + +// ToFlagSlice transforms the provided arguments to a slice of string flags. +// A flag name is taken directly from the key and the value is simply attached. +// A flag is repeated iff it has more than one value. +func ToFlagSlice(args map[string][]string) []string { + var keys []string + for key := range args { + keys = append(keys, key) + } + sort.Strings(keys) + + var flags []string + for _, key := range keys { + for _, token := range args[key] { + flags = append(flags, fmt.Sprintf("--%s=%s", key, token)) + } + } + return flags +} + +// flagsFromUnstructured process the unstructured arguments (interface{}) to a map of strings. +// There are only two supported/valid types for arguments, that is []sting and/or string. +// Passing a different type yield an error. +func flagsFromUnstructured(unstructuredArgs map[string]interface{}) (map[string][]string, error) { + ret := map[string][]string{} + for argName, argRawValue := range unstructuredArgs { + var argsSlice []string + var found bool + var err error + + argsSlice, found, err = unstructured.NestedStringSlice(unstructuredArgs, argName) + if !found || err != nil { + str, found, err := unstructured.NestedString(unstructuredArgs, argName) + if !found || err != nil { + return nil, fmt.Errorf("unable to process an argument, incorrect value %v under %v key, expected []string or string", argRawValue, argName) + } + argsSlice = append(argsSlice, str) + } + + ret[argName] = argsSlice + } + + return ret, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go new file mode 100644 index 0000000000..bdfe17d92a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go @@ -0,0 +1,127 @@ +package v1helpers + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +var ( + emptyGetOptions = metav1.GetOptions{} + emptyListOptions = metav1.ListOptions{} +) + +type combinedConfigMapGetter struct { + client corev1client.ConfigMapsGetter + listers KubeInformersForNamespaces +} + +func CachedConfigMapGetter(client corev1client.ConfigMapsGetter, listers KubeInformersForNamespaces) corev1client.ConfigMapsGetter { + return &combinedConfigMapGetter{ + client: client, + listers: listers, + } +} + +type combinedConfigMapInterface struct { + corev1client.ConfigMapInterface + lister corev1listers.ConfigMapNamespaceLister + namespace string +} + +func (g combinedConfigMapGetter) ConfigMaps(namespace string) corev1client.ConfigMapInterface { + return combinedConfigMapInterface{ + ConfigMapInterface: g.client.ConfigMaps(namespace), + lister: g.listers.InformersFor(namespace).Core().V1().ConfigMaps().Lister().ConfigMaps(namespace), + namespace: namespace, + } +} + +func (g combinedConfigMapInterface) Get(_ context.Context, name string, options metav1.GetOptions) (*corev1.ConfigMap, error) { + if !equality.Semantic.DeepEqual(options, emptyGetOptions) { + return nil, fmt.Errorf("GetOptions are not honored by cached client: %#v", options) + } + + ret, err := g.lister.Get(name) + if err != nil { + return nil, err + } + return ret.DeepCopy(), nil +} +func (g combinedConfigMapInterface) List(_ context.Context, options metav1.ListOptions) (*corev1.ConfigMapList, error) { + if !equality.Semantic.DeepEqual(options, emptyListOptions) { + return nil, fmt.Errorf("ListOptions are not honored by cached client: %#v", options) + } + + list, err := g.lister.List(labels.Everything()) + if err != nil { + return nil, err + } + + ret := &corev1.ConfigMapList{} + for i := range list { + ret.Items = append(ret.Items, *(list[i].DeepCopy())) + } + return ret, nil +} + +type combinedSecretGetter struct { + client corev1client.SecretsGetter + listers KubeInformersForNamespaces +} + +func CachedSecretGetter(client corev1client.SecretsGetter, listers KubeInformersForNamespaces) corev1client.SecretsGetter { + return &combinedSecretGetter{ + client: client, + listers: listers, + } +} + +type combinedSecretInterface struct { + corev1client.SecretInterface + lister corev1listers.SecretNamespaceLister + namespace string +} + +func (g combinedSecretGetter) Secrets(namespace string) corev1client.SecretInterface { + return combinedSecretInterface{ + SecretInterface: g.client.Secrets(namespace), + lister: g.listers.InformersFor(namespace).Core().V1().Secrets().Lister().Secrets(namespace), + namespace: namespace, + } +} + +func (g combinedSecretInterface) Get(_ context.Context, name string, options metav1.GetOptions) (*corev1.Secret, error) { + if !equality.Semantic.DeepEqual(options, emptyGetOptions) { + return nil, fmt.Errorf("GetOptions are not honored by cached client: %#v", options) + } + + ret, err := g.lister.Get(name) + if err != nil { + return nil, err + } + return ret.DeepCopy(), nil +} + +func (g combinedSecretInterface) List(_ context.Context, options metav1.ListOptions) (*corev1.SecretList, error) { + if !equality.Semantic.DeepEqual(options, emptyListOptions) { + return nil, fmt.Errorf("ListOptions are not honored by cached client: %#v", options) + } + + list, err := g.lister.List(labels.Everything()) + if err != nil { + return nil, err + } + + ret := &corev1.SecretList{} + for i := range list { + ret.Items = append(ret.Items, *(list[i].DeepCopy())) + } + return ret, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go new file mode 100644 index 0000000000..8933328978 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go @@ -0,0 +1,7 @@ +package v1helpers + +import "k8s.io/client-go/informers" + +func NewFakeKubeInformersForNamespaces(informers map[string]informers.SharedInformerFactory) KubeInformersForNamespaces { + return kubeInformersForNamespaces(informers) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go new file mode 100644 index 0000000000..1a332f7dc7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go @@ -0,0 +1,384 @@ +package v1helpers + +import ( + "errors" + "fmt" + "os" + "sort" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/util/retry" + + "github.com/ghodss/yaml" + + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" +) + +// SetOperandVersion sets the new version and returns the previous value. +func SetOperandVersion(versions *[]configv1.OperandVersion, operandVersion configv1.OperandVersion) string { + if versions == nil { + versions = &[]configv1.OperandVersion{} + } + existingVersion := FindOperandVersion(*versions, operandVersion.Name) + if existingVersion == nil { + *versions = append(*versions, operandVersion) + return "" + } + + previous := existingVersion.Version + existingVersion.Version = operandVersion.Version + return previous +} + +func FindOperandVersion(versions []configv1.OperandVersion, name string) *configv1.OperandVersion { + if versions == nil { + return nil + } + for i := range versions { + if versions[i].Name == name { + return &versions[i] + } + } + return nil +} + +func SetOperatorCondition(conditions *[]operatorv1.OperatorCondition, newCondition operatorv1.OperatorCondition) { + if conditions == nil { + conditions = &[]operatorv1.OperatorCondition{} + } + existingCondition := FindOperatorCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +func RemoveOperatorCondition(conditions *[]operatorv1.OperatorCondition, conditionType string) { + if conditions == nil { + conditions = &[]operatorv1.OperatorCondition{} + } + newConditions := []operatorv1.OperatorCondition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +func FindOperatorCondition(conditions []operatorv1.OperatorCondition, conditionType string) *operatorv1.OperatorCondition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +func IsOperatorConditionTrue(conditions []operatorv1.OperatorCondition, conditionType string) bool { + return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionTrue) +} + +func IsOperatorConditionFalse(conditions []operatorv1.OperatorCondition, conditionType string) bool { + return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionFalse) +} + +func IsOperatorConditionPresentAndEqual(conditions []operatorv1.OperatorCondition, conditionType string, status operatorv1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} + +// UpdateOperatorSpecFunc is a func that mutates an operator spec. +type UpdateOperatorSpecFunc func(spec *operatorv1.OperatorSpec) error + +// UpdateSpec applies the update funcs to the oldStatus and tries to update via the client. +func UpdateSpec(client OperatorClient, updateFuncs ...UpdateOperatorSpecFunc) (*operatorv1.OperatorSpec, bool, error) { + updated := false + var operatorSpec *operatorv1.OperatorSpec + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + oldSpec, _, resourceVersion, err := client.GetOperatorState() + if err != nil { + return err + } + + newSpec := oldSpec.DeepCopy() + for _, update := range updateFuncs { + if err := update(newSpec); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldSpec, newSpec) { + return nil + } + + operatorSpec, _, err = client.UpdateOperatorSpec(resourceVersion, newSpec) + updated = err == nil + return err + }) + + return operatorSpec, updated, err +} + +// UpdateSpecConfigFn returns a func to update the config. +func UpdateObservedConfigFn(config map[string]interface{}) UpdateOperatorSpecFunc { + return func(oldSpec *operatorv1.OperatorSpec) error { + oldSpec.ObservedConfig = runtime.RawExtension{Object: &unstructured.Unstructured{Object: config}} + return nil + } +} + +// UpdateStatusFunc is a func that mutates an operator status. +type UpdateStatusFunc func(status *operatorv1.OperatorStatus) error + +// UpdateStatus applies the update funcs to the oldStatus and tries to update via the client. +func UpdateStatus(client OperatorClient, updateFuncs ...UpdateStatusFunc) (*operatorv1.OperatorStatus, bool, error) { + updated := false + var updatedOperatorStatus *operatorv1.OperatorStatus + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, oldStatus, resourceVersion, err := client.GetOperatorState() + if err != nil { + return err + } + + newStatus := oldStatus.DeepCopy() + for _, update := range updateFuncs { + if err := update(newStatus); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldStatus, newStatus) { + // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied. + updatedOperatorStatus = newStatus + return nil + } + + updatedOperatorStatus, err = client.UpdateOperatorStatus(resourceVersion, newStatus) + updated = err == nil + return err + }) + + return updatedOperatorStatus, updated, err +} + +// UpdateConditionFunc returns a func to update a condition. +func UpdateConditionFn(cond operatorv1.OperatorCondition) UpdateStatusFunc { + return func(oldStatus *operatorv1.OperatorStatus) error { + SetOperatorCondition(&oldStatus.Conditions, cond) + return nil + } +} + +// UpdateStatusFunc is a func that mutates an operator status. +type UpdateStaticPodStatusFunc func(status *operatorv1.StaticPodOperatorStatus) error + +// UpdateStaticPodStatus applies the update funcs to the oldStatus abd tries to update via the client. +func UpdateStaticPodStatus(client StaticPodOperatorClient, updateFuncs ...UpdateStaticPodStatusFunc) (*operatorv1.StaticPodOperatorStatus, bool, error) { + updated := false + var updatedOperatorStatus *operatorv1.StaticPodOperatorStatus + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, oldStatus, resourceVersion, err := client.GetStaticPodOperatorState() + if err != nil { + return err + } + + newStatus := oldStatus.DeepCopy() + for _, update := range updateFuncs { + if err := update(newStatus); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldStatus, newStatus) { + // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied. + updatedOperatorStatus = newStatus + return nil + } + + updatedOperatorStatus, err = client.UpdateStaticPodOperatorStatus(resourceVersion, newStatus) + updated = err == nil + return err + }) + + return updatedOperatorStatus, updated, err +} + +// UpdateStaticPodConditionFn returns a func to update a condition. +func UpdateStaticPodConditionFn(cond operatorv1.OperatorCondition) UpdateStaticPodStatusFunc { + return func(oldStatus *operatorv1.StaticPodOperatorStatus) error { + SetOperatorCondition(&oldStatus.Conditions, cond) + return nil + } +} + +// EnsureFinalizer adds a new finalizer to the operator CR, if it does not exists. No-op otherwise. +// The finalizer name is computed from the controller name and operator name ($OPERATOR_NAME or os.Args[0]) +// It re-tries on conflicts. +func EnsureFinalizer(client OperatorClientWithFinalizers, controllerName string) error { + finalizer := getFinalizerName(controllerName) + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + return client.EnsureFinalizer(finalizer) + }) + return err +} + +// RemoveFinalizer removes a finalizer from the operator CR, if it is there. No-op otherwise. +// The finalizer name is computed from the controller name and operator name ($OPERATOR_NAME or os.Args[0]) +// It re-tries on conflicts. +func RemoveFinalizer(client OperatorClientWithFinalizers, controllerName string) error { + finalizer := getFinalizerName(controllerName) + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + return client.RemoveFinalizer(finalizer) + }) + return err +} + +// getFinalizerName computes a nice finalizer name from controllerName and the operator name ($OPERATOR_NAME or os.Args[0]). +func getFinalizerName(controllerName string) string { + return fmt.Sprintf("%s.operator.openshift.io/%s", getOperatorName(), controllerName) +} + +func getOperatorName() string { + if name := os.Getenv("OPERATOR_NAME"); name != "" { + return name + } + return os.Args[0] +} + +type aggregate []error + +var _ utilerrors.Aggregate = aggregate{} + +// NewMultiLineAggregate returns an aggregate error with multi-line output +func NewMultiLineAggregate(errList []error) error { + var errs []error + for _, e := range errList { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// Error is part of the error interface. +func (agg aggregate) Error() string { + msgs := make([]string, len(agg)) + for i := range agg { + msgs[i] = agg[i].Error() + } + return strings.Join(msgs, "\n") +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} + +// Is is part of the Aggregate interface +func (agg aggregate) Is(target error) bool { + return agg.visit(func(err error) bool { + return errors.Is(err, target) + }) +} + +func (agg aggregate) visit(f func(err error) bool) bool { + for _, err := range agg { + switch err := err.(type) { + case aggregate: + if match := err.visit(f); match { + return match + } + case utilerrors.Aggregate: + for _, nestedErr := range err.Errors() { + if match := f(nestedErr); match { + return match + } + } + default: + if match := f(err); match { + return match + } + } + } + + return false +} + +// MapToEnvVars converts a string-string map to a slice of corev1.EnvVar-s +func MapToEnvVars(mapEnvVars map[string]string) []corev1.EnvVar { + if mapEnvVars == nil { + return nil + } + + envVars := make([]corev1.EnvVar, len(mapEnvVars)) + i := 0 + for k, v := range mapEnvVars { + envVars[i] = corev1.EnvVar{Name: k, Value: v} + i++ + } + + // need to sort the slice so that kube-controller-manager-pod configmap does not change all the time + sort.Slice(envVars, func(i, j int) bool { return envVars[i].Name < envVars[j].Name }) + return envVars +} + +// InjectObservedProxyIntoContainers injects proxy environment variables in containers specified in containerNames. +func InjectObservedProxyIntoContainers(podSpec *corev1.PodSpec, containerNames []string, observedConfig []byte, fields ...string) error { + var config map[string]interface{} + if err := yaml.Unmarshal(observedConfig, &config); err != nil { + return fmt.Errorf("failed to unmarshal the observedConfig: %w", err) + } + + proxyConfig, found, err := unstructured.NestedStringMap(config, fields...) + if err != nil { + return fmt.Errorf("couldn't get the proxy config from observedConfig: %w", err) + } + + proxyEnvVars := MapToEnvVars(proxyConfig) + if !found || len(proxyEnvVars) < 1 { + // There's no observed proxy config, we should tolerate that + return nil + } + + for _, containerName := range containerNames { + for i := range podSpec.InitContainers { + if podSpec.InitContainers[i].Name == containerName { + podSpec.InitContainers[i].Env = append(podSpec.InitContainers[i].Env, proxyEnvVars...) + } + } + for i := range podSpec.Containers { + if podSpec.Containers[i].Name == containerName { + podSpec.Containers[i].Env = append(podSpec.Containers[i].Env, proxyEnvVars...) + } + } + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go new file mode 100644 index 0000000000..ba3769252d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go @@ -0,0 +1,135 @@ +package v1helpers + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// KubeInformersForNamespaces is a simple way to combine several shared informers into a single struct with unified listing power +type KubeInformersForNamespaces interface { + Start(stopCh <-chan struct{}) + InformersFor(namespace string) informers.SharedInformerFactory + Namespaces() sets.String + + ConfigMapLister() corev1listers.ConfigMapLister + SecretLister() corev1listers.SecretLister + + // Used in by workloads controller and controllers that report deployment pods status + PodLister() corev1listers.PodLister +} + +var _ KubeInformersForNamespaces = kubeInformersForNamespaces{} + +func NewKubeInformersForNamespaces(kubeClient kubernetes.Interface, namespaces ...string) KubeInformersForNamespaces { + ret := kubeInformersForNamespaces{} + for _, namespace := range namespaces { + if len(namespace) == 0 { + ret[""] = informers.NewSharedInformerFactory(kubeClient, 10*time.Minute) + continue + } + ret[namespace] = informers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Minute, informers.WithNamespace(namespace)) + } + + return ret +} + +type kubeInformersForNamespaces map[string]informers.SharedInformerFactory + +func (i kubeInformersForNamespaces) Start(stopCh <-chan struct{}) { + for _, informer := range i { + informer.Start(stopCh) + } +} + +func (i kubeInformersForNamespaces) Namespaces() sets.String { + return sets.StringKeySet(i) +} +func (i kubeInformersForNamespaces) InformersFor(namespace string) informers.SharedInformerFactory { + return i[namespace] +} + +func (i kubeInformersForNamespaces) HasInformersFor(namespace string) bool { + return i.InformersFor(namespace) != nil +} + +type configMapLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) ConfigMapLister() corev1listers.ConfigMapLister { + return configMapLister(i) +} + +func (l configMapLister) List(selector labels.Selector) (ret []*corev1.ConfigMap, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().ConfigMaps().Lister().List(selector) +} + +func (l configMapLister) ConfigMaps(namespace string) corev1listers.ConfigMapNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().ConfigMaps().Lister().ConfigMaps(namespace) +} + +type secretLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) SecretLister() corev1listers.SecretLister { + return secretLister(i) +} + +func (l secretLister) List(selector labels.Selector) (ret []*corev1.Secret, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().Secrets().Lister().List(selector) +} + +func (l secretLister) Secrets(namespace string) corev1listers.SecretNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().Secrets().Lister().Secrets(namespace) +} + +type podLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) PodLister() corev1listers.PodLister { + return podLister(i) +} + +func (l podLister) List(selector labels.Selector) (ret []*corev1.Pod, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().Pods().Lister().List(selector) +} + +func (l podLister) Pods(namespace string) corev1listers.PodNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().Pods().Lister().Pods(namespace) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go new file mode 100644 index 0000000000..f5d60d9cff --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go @@ -0,0 +1,41 @@ +package v1helpers + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" +) + +type OperatorClient interface { + Informer() cache.SharedIndexInformer + // GetObjectMeta return the operator metadata. + GetObjectMeta() (meta *metav1.ObjectMeta, err error) + // GetOperatorState returns the operator spec, status and the resource version, potentially from a lister. + GetOperatorState() (spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, resourceVersion string, err error) + // UpdateOperatorSpec updates the spec of the operator, assuming the given resource version. + UpdateOperatorSpec(oldResourceVersion string, in *operatorv1.OperatorSpec) (out *operatorv1.OperatorSpec, newResourceVersion string, err error) + // UpdateOperatorStatus updates the status of the operator, assuming the given resource version. + UpdateOperatorStatus(oldResourceVersion string, in *operatorv1.OperatorStatus) (out *operatorv1.OperatorStatus, err error) +} + +type StaticPodOperatorClient interface { + OperatorClient + // GetStaticPodOperatorState returns the static pod operator spec, status and the resource version, + // potentially from a lister. + GetStaticPodOperatorState() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) + // GetStaticPodOperatorStateWithQuorum return the static pod operator spec, status and resource version + // directly from a server read. + GetStaticPodOperatorStateWithQuorum() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) + // UpdateStaticPodOperatorStatus updates the status, assuming the given resource version. + UpdateStaticPodOperatorStatus(resourceVersion string, in *operatorv1.StaticPodOperatorStatus) (out *operatorv1.StaticPodOperatorStatus, err error) + // UpdateStaticPodOperatorSpec updates the spec, assuming the given resource version. + UpdateStaticPodOperatorSpec(resourceVersion string, in *operatorv1.StaticPodOperatorSpec) (out *operatorv1.StaticPodOperatorSpec, newResourceVersion string, err error) +} + +type OperatorClientWithFinalizers interface { + OperatorClient + // EnsureFinalizer adds a new finalizer to the operator CR, if it does not exists. No-op otherwise. + EnsureFinalizer(finalizer string) error + // RemoveFinalizer removes a finalizer from the operator CR, if it is there. No-op otherwise. + RemoveFinalizer(finalizer string) error +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go new file mode 100644 index 0000000000..4c3a604c75 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go @@ -0,0 +1,288 @@ +package v1helpers + +import ( + "context" + "fmt" + "strconv" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NewFakeSharedIndexInformer returns a fake shared index informer, suitable to use in static pod controller unit tests. +func NewFakeSharedIndexInformer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +type fakeSharedIndexInformer struct{} + +func (fakeSharedIndexInformer) AddEventHandler(handler cache.ResourceEventHandler) { +} + +func (fakeSharedIndexInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { +} + +func (fakeSharedIndexInformer) GetStore() cache.Store { + panic("implement me") +} + +func (fakeSharedIndexInformer) GetController() cache.Controller { + panic("implement me") +} + +func (fakeSharedIndexInformer) Run(stopCh <-chan struct{}) { + panic("implement me") +} + +func (fakeSharedIndexInformer) HasSynced() bool { + return true +} + +func (fakeSharedIndexInformer) LastSyncResourceVersion() string { + panic("implement me") +} + +func (fakeSharedIndexInformer) AddIndexers(indexers cache.Indexers) error { + panic("implement me") +} + +func (fakeSharedIndexInformer) GetIndexer() cache.Indexer { + panic("implement me") +} + +func (fakeSharedIndexInformer) SetWatchErrorHandler(handler cache.WatchErrorHandler) error { + panic("implement me") +} + +// NewFakeStaticPodOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. +func NewFakeStaticPodOperatorClient( + staticPodSpec *operatorv1.StaticPodOperatorSpec, staticPodStatus *operatorv1.StaticPodOperatorStatus, + triggerStatusErr func(rv string, status *operatorv1.StaticPodOperatorStatus) error, + triggerSpecErr func(rv string, spec *operatorv1.StaticPodOperatorSpec) error) StaticPodOperatorClient { + return &fakeStaticPodOperatorClient{ + fakeStaticPodOperatorSpec: staticPodSpec, + fakeStaticPodOperatorStatus: staticPodStatus, + resourceVersion: "0", + triggerStatusUpdateError: triggerStatusErr, + triggerSpecUpdateError: triggerSpecErr, + } +} + +type fakeStaticPodOperatorClient struct { + fakeStaticPodOperatorSpec *operatorv1.StaticPodOperatorSpec + fakeStaticPodOperatorStatus *operatorv1.StaticPodOperatorStatus + resourceVersion string + triggerStatusUpdateError func(rv string, status *operatorv1.StaticPodOperatorStatus) error + triggerSpecUpdateError func(rv string, status *operatorv1.StaticPodOperatorSpec) error +} + +func (c *fakeStaticPodOperatorClient) Informer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} + +} +func (c *fakeStaticPodOperatorClient) GetObjectMeta() (*metav1.ObjectMeta, error) { + panic("not supported") +} + +func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorState() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorStateWithQuorum() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorStatus(resourceVersion string, status *operatorv1.StaticPodOperatorStatus) (*operatorv1.StaticPodOperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil { + return nil, err + } + } + c.fakeStaticPodOperatorStatus = status + return c.fakeStaticPodOperatorStatus, nil +} + +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorSpec(resourceVersion string, spec *operatorv1.StaticPodOperatorSpec) (*operatorv1.StaticPodOperatorSpec, string, error) { + if c.resourceVersion != resourceVersion { + return nil, "", errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, "", err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerSpecUpdateError != nil { + if err := c.triggerSpecUpdateError(resourceVersion, spec); err != nil { + return nil, "", err + } + } + c.fakeStaticPodOperatorSpec = spec + return c.fakeStaticPodOperatorSpec, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return &c.fakeStaticPodOperatorSpec.OperatorSpec, &c.fakeStaticPodOperatorStatus.OperatorStatus, c.resourceVersion, nil +} +func (c *fakeStaticPodOperatorClient) UpdateOperatorSpec(string, *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) { + panic("not supported") +} +func (c *fakeStaticPodOperatorClient) UpdateOperatorStatus(resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + staticPodStatus := c.fakeStaticPodOperatorStatus.DeepCopy() + staticPodStatus.OperatorStatus = *status + if err := c.triggerStatusUpdateError(resourceVersion, staticPodStatus); err != nil { + return nil, err + } + } + c.fakeStaticPodOperatorStatus.OperatorStatus = *status + return &c.fakeStaticPodOperatorStatus.OperatorStatus, nil +} + +// NewFakeNodeLister returns a fake node lister suitable to use in node controller unit test +func NewFakeNodeLister(client kubernetes.Interface) corev1listers.NodeLister { + return &fakeNodeLister{client: client} +} + +type fakeNodeLister struct { + client kubernetes.Interface +} + +func (n *fakeNodeLister) List(selector labels.Selector) ([]*corev1.Node, error) { + nodes, err := n.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, err + } + ret := []*corev1.Node{} + for i := range nodes.Items { + ret = append(ret, &nodes.Items[i]) + } + return ret, nil +} + +func (n *fakeNodeLister) Get(name string) (*corev1.Node, error) { + panic("implement me") +} + +// NewFakeOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. +func NewFakeOperatorClient(spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClientWithFinalizers { + return NewFakeOperatorClientWithObjectMeta(nil, spec, status, triggerErr) +} + +func NewFakeOperatorClientWithObjectMeta(meta *metav1.ObjectMeta, spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClientWithFinalizers { + return &fakeOperatorClient{ + fakeOperatorSpec: spec, + fakeOperatorStatus: status, + fakeObjectMeta: meta, + resourceVersion: "0", + triggerStatusUpdateError: triggerErr, + } +} + +type fakeOperatorClient struct { + fakeOperatorSpec *operatorv1.OperatorSpec + fakeOperatorStatus *operatorv1.OperatorStatus + fakeObjectMeta *metav1.ObjectMeta + resourceVersion string + triggerStatusUpdateError func(rv string, status *operatorv1.OperatorStatus) error +} + +func (c *fakeOperatorClient) Informer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +func (c *fakeOperatorClient) GetObjectMeta() (*metav1.ObjectMeta, error) { + if c.fakeObjectMeta == nil { + return &metav1.ObjectMeta{}, nil + } + + return c.fakeObjectMeta, nil +} + +func (c *fakeOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return c.fakeOperatorSpec, c.fakeOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeOperatorClient) UpdateOperatorStatus(resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil { + return nil, err + } + } + c.fakeOperatorStatus = status + return c.fakeOperatorStatus, nil +} + +func (c *fakeOperatorClient) UpdateOperatorSpec(resourceVersion string, spec *operatorv1.OperatorSpec) (*operatorv1.OperatorSpec, string, error) { + if c.resourceVersion != resourceVersion { + return nil, c.resourceVersion, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, c.resourceVersion, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + c.fakeOperatorSpec = spec + return c.fakeOperatorSpec, c.resourceVersion, nil +} + +func (c *fakeOperatorClient) EnsureFinalizer(finalizer string) error { + if c.fakeObjectMeta == nil { + c.fakeObjectMeta = &metav1.ObjectMeta{} + } + for _, f := range c.fakeObjectMeta.Finalizers { + if f == finalizer { + return nil + } + } + c.fakeObjectMeta.Finalizers = append(c.fakeObjectMeta.Finalizers, finalizer) + return nil +} + +func (c *fakeOperatorClient) RemoveFinalizer(finalizer string) error { + newFinalizers := []string{} + for _, f := range c.fakeObjectMeta.Finalizers { + if f == finalizer { + continue + } + newFinalizers = append(newFinalizers, f) + } + c.fakeObjectMeta.Finalizers = newFinalizers + return nil +} + +func (c *fakeOperatorClient) SetObjectMeta(meta *metav1.ObjectMeta) { + c.fakeObjectMeta = meta +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index ba94405af4..58ff13d7f5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -398,7 +398,7 @@ func (r *Registry) Unregister(c Collector) bool { func (r *Registry) MustRegister(cs ...Collector) { for _, c := range cs { if err := r.Register(c); err != nil { - panic(err) + //panic(err) } } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 36c81709f7..247761d7e4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -350,7 +350,7 @@ github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers -# github.com/google/btree v1.0.0 => github.com/google/btree v1.0.0 +# github.com/google/btree v1.0.1-0.20190326150332-20236160a414 => github.com/google/btree v1.0.0 github.com/google/btree # github.com/google/cadvisor v0.39.0 => github.com/openshift/google-cadvisor v0.33.2-0.20210610135131-57b941c7657a github.com/google/cadvisor/accelerators @@ -642,6 +642,7 @@ github.com/openshift/api/samples github.com/openshift/api/samples/v1 github.com/openshift/api/security github.com/openshift/api/security/v1 +github.com/openshift/api/securityinternal/v1 github.com/openshift/api/servicecertsigner github.com/openshift/api/servicecertsigner/v1alpha1 github.com/openshift/api/template @@ -670,7 +671,7 @@ github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/selinux github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/user github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/sort -# github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e => github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359 +# github.com/openshift/build-machinery-go v0.0.0-20210806203541-4ea9b6da3a37 => github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359 ## explicit github.com/openshift/build-machinery-go github.com/openshift/build-machinery-go/make @@ -680,7 +681,7 @@ github.com/openshift/build-machinery-go/make/targets/golang github.com/openshift/build-machinery-go/make/targets/openshift github.com/openshift/build-machinery-go/make/targets/openshift/operator github.com/openshift/build-machinery-go/scripts -# github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 => github.com/openshift/client-go v0.0.0-20210422153130-25c8450d1535 +# github.com/openshift/client-go v0.0.0-20210831095141-e19a065e79f7 => github.com/openshift/client-go v0.0.0-20210422153130-25c8450d1535 ## explicit github.com/openshift/client-go/apiserver/clientset/versioned/scheme github.com/openshift/client-go/apiserver/clientset/versioned/typed/apiserver/v1 @@ -778,6 +779,9 @@ github.com/openshift/client-go/security/informers/externalversions/internalinter github.com/openshift/client-go/security/informers/externalversions/security github.com/openshift/client-go/security/informers/externalversions/security/v1 github.com/openshift/client-go/security/listers/security/v1 +github.com/openshift/client-go/securityinternal/clientset/versioned +github.com/openshift/client-go/securityinternal/clientset/versioned/scheme +github.com/openshift/client-go/securityinternal/clientset/versioned/typed/securityinternal/v1 github.com/openshift/client-go/template/clientset/versioned github.com/openshift/client-go/template/clientset/versioned/scheme github.com/openshift/client-go/template/clientset/versioned/typed/template/v1 @@ -794,7 +798,19 @@ github.com/openshift/client-go/user/informers/externalversions/internalinterface github.com/openshift/client-go/user/informers/externalversions/user github.com/openshift/client-go/user/informers/externalversions/user/v1 github.com/openshift/client-go/user/listers/user/v1 -# github.com/openshift/library-go v0.0.0-20210825122301-7f0bf922c345 => github.com/openshift/library-go v0.0.0-20210407092538-7021fda6f427 +# github.com/openshift/cluster-policy-controller v0.0.0-20210723200948-8fbffaf2b3c7 +## explicit +github.com/openshift/cluster-policy-controller/pkg/client/genericinformers +github.com/openshift/cluster-policy-controller/pkg/cmd/cluster-policy-controller +github.com/openshift/cluster-policy-controller/pkg/cmd/controller +github.com/openshift/cluster-policy-controller/pkg/quota/clusterquotareconciliation +github.com/openshift/cluster-policy-controller/pkg/quota/quotaimageexternal +github.com/openshift/cluster-policy-controller/pkg/security/controller +github.com/openshift/cluster-policy-controller/pkg/security/mcs +github.com/openshift/cluster-policy-controller/pkg/security/uidallocator +github.com/openshift/cluster-policy-controller/pkg/version +# github.com/openshift/library-go v0.0.0-20210825122301-7f0bf922c345 => github.com/openshift/library-go v0.0.0-20210720151324-cfbfc8feace0 +## explicit github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout github.com/openshift/library-go/pkg/apiserver/apiserverconfig @@ -814,6 +830,10 @@ github.com/openshift/library-go/pkg/config/helpers github.com/openshift/library-go/pkg/config/leaderelection github.com/openshift/library-go/pkg/config/serving github.com/openshift/library-go/pkg/config/validation +github.com/openshift/library-go/pkg/controller/controllercmd +github.com/openshift/library-go/pkg/controller/factory +github.com/openshift/library-go/pkg/controller/fileobserver +github.com/openshift/library-go/pkg/controller/metrics github.com/openshift/library-go/pkg/crypto github.com/openshift/library-go/pkg/image/imageutil github.com/openshift/library-go/pkg/image/internal/digest @@ -827,6 +847,10 @@ github.com/openshift/library-go/pkg/monitor/health github.com/openshift/library-go/pkg/network github.com/openshift/library-go/pkg/oauth/oauthdiscovery github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient +github.com/openshift/library-go/pkg/operator/csr +github.com/openshift/library-go/pkg/operator/events +github.com/openshift/library-go/pkg/operator/management +github.com/openshift/library-go/pkg/operator/v1helpers github.com/openshift/library-go/pkg/quota/clusterquotamapping github.com/openshift/library-go/pkg/quota/quotautil github.com/openshift/library-go/pkg/security/ldaputil @@ -1126,7 +1150,7 @@ github.com/soheilhy/cmux # github.com/spf13/afero v1.2.2 => github.com/spf13/afero v1.2.2 github.com/spf13/afero github.com/spf13/afero/mem -# github.com/spf13/cobra v1.1.1 => github.com/spf13/cobra v1.1.1 +# github.com/spf13/cobra v1.1.3 => github.com/spf13/cobra v1.1.1 ## explicit github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 => github.com/spf13/pflag v1.0.5 @@ -1296,7 +1320,7 @@ go.starlark.net/syntax go.uber.org/atomic # go.uber.org/multierr v1.1.1-0.20180122172545-ddea229ff1df => go.uber.org/multierr v1.1.0 go.uber.org/multierr -# go.uber.org/zap v1.11.0 => go.uber.org/zap v1.10.0 +# go.uber.org/zap v1.13.0 => go.uber.org/zap v1.10.0 go.uber.org/zap go.uber.org/zap/buffer go.uber.org/zap/internal/bufferpool @@ -1562,7 +1586,7 @@ gopkg.in/warnings.v0 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c => gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c gopkg.in/yaml.v3 -# k8s.io/api v0.21.1 => k8s.io/api v0.21.1 +# k8s.io/api v0.22.1 => k8s.io/api v0.21.1 ## explicit k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1610,7 +1634,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.21.0 => k8s.io/apiextensions-apiserver v0.21.0 +# k8s.io/apiextensions-apiserver v0.22.1 => k8s.io/apiextensions-apiserver v0.21.0 ## explicit k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1651,7 +1675,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi k8s.io/apiextensions-apiserver/pkg/registry/customresource k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition -# k8s.io/apimachinery v0.21.1 => k8s.io/apimachinery v0.21.1 +# k8s.io/apimachinery v0.22.1 => k8s.io/apimachinery v0.21.1 ## explicit k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1716,7 +1740,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.21.1 => github.com/openshift/kubernetes-apiserver v0.0.0-20210527175848-55ee66589915 +# k8s.io/apiserver v0.22.1 => github.com/openshift/kubernetes-apiserver v0.0.0-20210527175848-55ee66589915 ## explicit k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -1860,7 +1884,7 @@ k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.21.1 => k8s.io/client-go v0.21.1 +# k8s.io/client-go v0.22.1 => k8s.io/client-go v0.21.1 ## explicit k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -2179,7 +2203,7 @@ k8s.io/cluster-bootstrap/token/jws k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/tokens -# k8s.io/component-base v0.21.1 => k8s.io/component-base v0.21.0 +# k8s.io/component-base v0.22.1 => k8s.io/component-base v0.21.0 ## explicit k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag @@ -2215,7 +2239,7 @@ k8s.io/component-helpers/node/topology k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/volume -# k8s.io/controller-manager v0.21.0 => k8s.io/controller-manager v0.21.0 +# k8s.io/controller-manager v0.21.2 => k8s.io/controller-manager v0.21.0 ## explicit k8s.io/controller-manager/app k8s.io/controller-manager/config @@ -2243,10 +2267,10 @@ k8s.io/gengo/parser k8s.io/gengo/types # k8s.io/heapster v1.2.0-beta.1 => k8s.io/heapster v1.2.0-beta.1 k8s.io/heapster/metrics/api/v1/types -# k8s.io/klog/v2 v2.8.0 => k8s.io/klog/v2 v2.8.0 +# k8s.io/klog/v2 v2.9.0 => k8s.io/klog/v2 v2.8.0 ## explicit k8s.io/klog/v2 -# k8s.io/kube-aggregator v0.21.0 => k8s.io/kube-aggregator v0.21.0 +# k8s.io/kube-aggregator v0.22.1 => k8s.io/kube-aggregator v0.21.0 ## explicit k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/install @@ -2334,7 +2358,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/podresources/v1 k8s.io/kubelet/pkg/apis/podresources/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.21.1 => github.com/openshift/kubernetes v0.0.0-20210918023457-a620f506e956 +# k8s.io/kubernetes v1.21.2 => github.com/openshift/kubernetes v0.0.0-20210918023457-a620f506e956 ## explicit k8s.io/kubernetes/cmd/kube-apiserver/app k8s.io/kubernetes/cmd/kube-apiserver/app/options @@ -3175,7 +3199,7 @@ k8s.io/legacy-cloud-providers/openstack k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers -# k8s.io/metrics v0.21.0 => k8s.io/metrics v0.21.0 +# k8s.io/metrics v0.21.0 => k8s.io/metrics v0.21.2 k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 k8s.io/metrics/pkg/apis/custom_metrics/v1beta2 @@ -3559,7 +3583,7 @@ sigs.k8s.io/yaml # github.com/openshift/apiserver-library-go => github.com/openshift/apiserver-library-go v0.0.0-20210721120111-70ce3cad7d84 # github.com/openshift/build-machinery-go => github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359 # github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20210422153130-25c8450d1535 -# github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20210407092538-7021fda6f427 +# github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20210720151324-cfbfc8feace0 # github.com/pascaldekloe/goe => github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c # github.com/pelletier/go-toml => github.com/pelletier/go-toml v1.2.0 # github.com/peterbourgon/diskv => github.com/peterbourgon/diskv v2.0.1+incompatible @@ -3693,7 +3717,7 @@ sigs.k8s.io/yaml # k8s.io/kubelet => k8s.io/kubelet v0.21.0 # k8s.io/kubernetes => github.com/openshift/kubernetes v0.0.0-20210918023457-a620f506e956 # k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.0 -# k8s.io/metrics => k8s.io/metrics v0.21.0 +# k8s.io/metrics => k8s.io/metrics v0.21.2 # k8s.io/mount-utils => k8s.io/mount-utils v0.21.0 # k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.0 # k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.21.0