From 2c897bab19b753bbd11de33a3d817b558ce18f85 Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Fri, 28 Apr 2023 17:35:45 -0400 Subject: [PATCH] introduce source union type --- cmd/manager/main.go | 22 +- ...d.operatorframework.io_catalogsources.yaml | 343 +++++++++++++++++- config/rbac/role.yaml | 13 +- .../samples/core_v1beta1_catalogsource.yaml | 14 +- go.mod | 12 +- go.sum | 19 + internal/source/common.go | 9 + internal/source/configmaps.go | 84 +++++ internal/source/git.go | 282 ++++++++++++++ internal/source/http.go | 88 +++++ internal/source/image.go | 272 ++++++++++++++ internal/source/unpacker.go | 127 +++++++ pkg/apis/core/v1beta1/catalogsource_types.go | 123 ++++++- .../core/v1beta1/zz_generated.deepcopy.go | 154 +++++++- .../core/catalogsource_controller.go | 252 ++++--------- 15 files changed, 1573 insertions(+), 241 deletions(-) create mode 100644 internal/source/common.go create mode 100644 internal/source/configmaps.go create mode 100644 internal/source/git.go create mode 100644 internal/source/http.go create mode 100644 internal/source/image.go create mode 100644 internal/source/unpacker.go diff --git a/cmd/manager/main.go b/cmd/manager/main.go index d715166a..fb2be851 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -21,22 +21,19 @@ import ( "fmt" "os" - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) - // to ensure that exec-entrypoint and run can make use of them. - _ "k8s.io/client-go/plugin/pkg/client/auth" - "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "github.com/operator-framework/catalogd/internal/source" "github.com/operator-framework/catalogd/internal/version" "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" corecontrollers "github.com/operator-framework/catalogd/pkg/controllers/core" "github.com/operator-framework/catalogd/pkg/profile" - //+kubebuilder:scaffold:imports ) var ( @@ -56,7 +53,7 @@ func main() { metricsAddr string enableLeaderElection bool probeAddr string - opmImage string + unpackImage string profiling bool catalogdVersion bool ) @@ -65,7 +62,8 @@ func main() { flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") - flag.StringVar(&opmImage, "opm-image", "quay.io/operator-framework/opm:v1.26", "The opm image to use when unpacking catalog images") + // TODO: should we move the unpacker to some common place? Or... hear me out... should catalogd just be a rukpak provisioner? + flag.StringVar(&unpackImage, "unpack-image", "quay.io/operator-framework/rukpak:v0.12.0", "The unpack image to use when unpacking catalog images") opts := zap.Options{ Development: true, } @@ -94,11 +92,15 @@ func main() { os.Exit(1) } + unpacker, err := source.NewDefaultUnpacker(mgr, "catalogd-system", unpackImage) + if err != nil { + setupLog.Error(err, "unable to create unpacker") + os.Exit(1) + } + if err = (&corecontrollers.CatalogSourceReconciler{ Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Cfg: mgr.GetConfig(), - OpmImage: opmImage, + Unpacker: unpacker, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "CatalogSource") os.Exit(1) diff --git a/config/crd/bases/catalogd.operatorframework.io_catalogsources.yaml b/config/crd/bases/catalogd.operatorframework.io_catalogsources.yaml index a376a4c4..fa6030be 100644 --- a/config/crd/bases/catalogd.operatorframework.io_catalogsources.yaml +++ b/config/crd/bases/catalogd.operatorframework.io_catalogsources.yaml @@ -34,19 +34,172 @@ spec: spec: description: CatalogSourceSpec defines the desired state of CatalogSource properties: - image: - description: Image is the Catalog image that contains Operators' metadata - in the FBC format https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs - type: string - pollingInterval: - description: PollingInterval is used to determine the time interval - between checks of the latest index image version. The image is polled - to see if a new version of the image is available. If available, - the latest image is pulled and the cache is updated to contain the - new content. - type: string + source: + properties: + configMaps: + description: ConfigMaps is a list of config map references and + their relative directory paths that represent a catalog filesystem. + items: + properties: + configMap: + description: ConfigMap is a reference to a configmap in + the catalogd system namespace + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + path: + description: Path is the relative directory path within + the catalog where the files from the configmap will be + present when the catalog is unpacked. + type: string + required: + - configMap + type: object + type: array + git: + description: Git is the git repository that backs the content + of this Catalog. + properties: + auth: + description: Auth configures the authorization method if necessary. + properties: + insecureSkipVerify: + description: InsecureSkipVerify controls whether a client + verifies the server's certificate chain and host name. + If InsecureSkipVerify is true, the clone operation will + accept any certificate presented by the server and any + host name in that certificate. In this mode, TLS is + susceptible to machine-in-the-middle attacks unless + custom verification is used. This should be used only + for testing. + type: boolean + secret: + description: Secret contains reference to the secret that + has authorization information and is in the namespace + that the provisioner is deployed. The secret is expected + to contain `data.username` and `data.password` for the + username and password, respectively for http(s) scheme. + Refer to https://kubernetes.io/docs/concepts/configuration/secret/#basic-authentication-secret + For the ssh authorization of the GitSource, the secret + is expected to contain `data.ssh-privatekey` and `data.ssh-knownhosts` + for the ssh privatekey and the host entry in the known_hosts + file respectively. Refer to https://kubernetes.io/docs/concepts/configuration/secret/#ssh-authentication-secrets + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + type: object + directory: + description: Directory refers to the location of the catalog + within the git repository. Directory is optional and if + not set defaults to ./manifests. + type: string + ref: + description: Ref configures the git source to clone a specific + branch, tag, or commit from the specified repo. Ref is required, + and exactly one field within Ref is required. Setting more + than one field or zero fields will result in an error. + properties: + branch: + description: Branch refers to the branch to checkout from + the repository. The Branch should contain the catalog + manifests in the specified directory. + type: string + commit: + description: Commit refers to the commit to checkout from + the repository. The Commit should contain the catalog + manifests in the specified directory. + type: string + tag: + description: Tag refers to the tag to checkout from the + repository. The Tag should contain the catalog manifests + in the specified directory. + type: string + type: object + repository: + description: Repository is a URL link to the git repository + containing the catalog. Repository is required and the URL + should be parsable by a standard git tool. + type: string + required: + - ref + - repository + type: object + http: + description: HTTP is the remote location that backs the content + of this Catalog. + properties: + auth: + description: Auth configures the authorization method if necessary. + properties: + insecureSkipVerify: + description: InsecureSkipVerify controls whether a client + verifies the server's certificate chain and host name. + If InsecureSkipVerify is true, the clone operation will + accept any certificate presented by the server and any + host name in that certificate. In this mode, TLS is + susceptible to machine-in-the-middle attacks unless + custom verification is used. This should be used only + for testing. + type: boolean + secret: + description: Secret contains reference to the secret that + has authorization information and is in the namespace + that the provisioner is deployed. The secret is expected + to contain `data.username` and `data.password` for the + username and password, respectively for http(s) scheme. + Refer to https://kubernetes.io/docs/concepts/configuration/secret/#basic-authentication-secret + For the ssh authorization of the GitSource, the secret + is expected to contain `data.ssh-privatekey` and `data.ssh-knownhosts` + for the ssh privatekey and the host entry in the known_hosts + file respectively. Refer to https://kubernetes.io/docs/concepts/configuration/secret/#ssh-authentication-secrets + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + type: object + url: + description: URL is where the catalog contents is. + type: string + required: + - url + type: object + image: + description: Image is the catalog image that backs the content + of this catalog. + properties: + pullSecret: + description: ImagePullSecretName contains the name of the + image pull secret in the namespace that the provisioner + is deployed. + type: string + ref: + description: Ref contains the reference to a container image + containing Catalog contents. + type: string + required: + - ref + type: object + type: + description: Type defines the kind of Catalog content being sourced. + type: string + required: + - type + type: object required: - - image + - source type: object status: description: CatalogSourceStatus defines the observed state of CatalogSource @@ -121,6 +274,172 @@ spec: - type type: object type: array + phase: + type: string + resolvedSource: + properties: + configMaps: + description: ConfigMaps is a list of config map references and + their relative directory paths that represent a catalog filesystem. + items: + properties: + configMap: + description: ConfigMap is a reference to a configmap in + the catalogd system namespace + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + path: + description: Path is the relative directory path within + the catalog where the files from the configmap will be + present when the catalog is unpacked. + type: string + required: + - configMap + type: object + type: array + git: + description: Git is the git repository that backs the content + of this Catalog. + properties: + auth: + description: Auth configures the authorization method if necessary. + properties: + insecureSkipVerify: + description: InsecureSkipVerify controls whether a client + verifies the server's certificate chain and host name. + If InsecureSkipVerify is true, the clone operation will + accept any certificate presented by the server and any + host name in that certificate. In this mode, TLS is + susceptible to machine-in-the-middle attacks unless + custom verification is used. This should be used only + for testing. + type: boolean + secret: + description: Secret contains reference to the secret that + has authorization information and is in the namespace + that the provisioner is deployed. The secret is expected + to contain `data.username` and `data.password` for the + username and password, respectively for http(s) scheme. + Refer to https://kubernetes.io/docs/concepts/configuration/secret/#basic-authentication-secret + For the ssh authorization of the GitSource, the secret + is expected to contain `data.ssh-privatekey` and `data.ssh-knownhosts` + for the ssh privatekey and the host entry in the known_hosts + file respectively. Refer to https://kubernetes.io/docs/concepts/configuration/secret/#ssh-authentication-secrets + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + type: object + directory: + description: Directory refers to the location of the catalog + within the git repository. Directory is optional and if + not set defaults to ./manifests. + type: string + ref: + description: Ref configures the git source to clone a specific + branch, tag, or commit from the specified repo. Ref is required, + and exactly one field within Ref is required. Setting more + than one field or zero fields will result in an error. + properties: + branch: + description: Branch refers to the branch to checkout from + the repository. The Branch should contain the catalog + manifests in the specified directory. + type: string + commit: + description: Commit refers to the commit to checkout from + the repository. The Commit should contain the catalog + manifests in the specified directory. + type: string + tag: + description: Tag refers to the tag to checkout from the + repository. The Tag should contain the catalog manifests + in the specified directory. + type: string + type: object + repository: + description: Repository is a URL link to the git repository + containing the catalog. Repository is required and the URL + should be parsable by a standard git tool. + type: string + required: + - ref + - repository + type: object + http: + description: HTTP is the remote location that backs the content + of this Catalog. + properties: + auth: + description: Auth configures the authorization method if necessary. + properties: + insecureSkipVerify: + description: InsecureSkipVerify controls whether a client + verifies the server's certificate chain and host name. + If InsecureSkipVerify is true, the clone operation will + accept any certificate presented by the server and any + host name in that certificate. In this mode, TLS is + susceptible to machine-in-the-middle attacks unless + custom verification is used. This should be used only + for testing. + type: boolean + secret: + description: Secret contains reference to the secret that + has authorization information and is in the namespace + that the provisioner is deployed. The secret is expected + to contain `data.username` and `data.password` for the + username and password, respectively for http(s) scheme. + Refer to https://kubernetes.io/docs/concepts/configuration/secret/#basic-authentication-secret + For the ssh authorization of the GitSource, the secret + is expected to contain `data.ssh-privatekey` and `data.ssh-knownhosts` + for the ssh privatekey and the host entry in the known_hosts + file respectively. Refer to https://kubernetes.io/docs/concepts/configuration/secret/#ssh-authentication-secrets + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + type: object + url: + description: URL is where the catalog contents is. + type: string + required: + - url + type: object + image: + description: Image is the catalog image that backs the content + of this catalog. + properties: + pullSecret: + description: ImagePullSecretName contains the name of the + image pull secret in the namespace that the provisioner + is deployed. + type: string + ref: + description: Ref contains the reference to a container image + containing Catalog contents. + type: string + required: + - ref + type: object + type: + description: Type defines the kind of Catalog content being sourced. + type: string + required: + - type + type: object type: object type: object served: true diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 5383b7d5..6d33bfee 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -4,15 +4,6 @@ kind: ClusterRole metadata: name: manager-role rules: -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - get - - list - - watch - apiGroups: - catalogd.operatorframework.io resources: @@ -96,8 +87,12 @@ rules: resources: - pods verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - "" diff --git a/config/samples/core_v1beta1_catalogsource.yaml b/config/samples/core_v1beta1_catalogsource.yaml index b34f3750..d2795ad0 100644 --- a/config/samples/core_v1beta1_catalogsource.yaml +++ b/config/samples/core_v1beta1_catalogsource.yaml @@ -1,13 +1,9 @@ apiVersion: catalogd.operatorframework.io/v1beta1 kind: CatalogSource metadata: - labels: - app.kubernetes.io/name: catalogsource - app.kubernetes.io/instance: catalogsource-sample - app.kubernetes.io/part-of: catalogd - app.kuberentes.io/managed-by: kustomize - app.kubernetes.io/created-by: catalogd - name: catalogsource-sample + name: operatorhubio spec: - image: quay.io/operatorhubio/catalog:latest - pollingInterval: 45m + source: + type: image + image: + ref: quay.io/operatorhubio/catalog:latest diff --git a/go.mod b/go.mod index 83a1a807..c08dd0b2 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,11 @@ module github.com/operator-framework/catalogd go 1.19 require ( + github.com/go-git/go-billy/v5 v5.1.0 + github.com/go-git/go-git/v5 v5.3.0 + github.com/nlepage/go-tarfs v1.1.0 github.com/operator-framework/operator-registry v1.26.3 + golang.org/x/crypto v0.1.0 k8s.io/api v0.26.0 k8s.io/apimachinery v0.26.0 k8s.io/client-go v0.26.0 @@ -11,16 +15,16 @@ require ( ) require ( + github.com/Microsoft/go-winio v0.4.17 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emirpasic/gods v1.12.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-git/gcfg v1.5.0 // indirect - github.com/go-git/go-billy/v5 v5.1.0 // indirect - github.com/go-git/go-git/v5 v5.3.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/zapr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect @@ -40,8 +44,10 @@ require ( github.com/joelanford/ignore v0.0.0-20210607151042-0d25dc18b62d // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -51,7 +57,9 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect + github.com/sergi/go-diff v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/xanzy/ssh-agent v0.3.0 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.24.0 // indirect diff --git a/go.sum b/go.sum index 1618dd9a..04e24473 100644 --- a/go.sum +++ b/go.sum @@ -35,13 +35,18 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -67,6 +72,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -79,12 +85,14 @@ github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2Vvl github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-billy/v5 v5.1.0 h1:4pl5BV4o7ZG/lterP4S6WzJ6xr49Ba5ET9ygheTYahk= github.com/go-git/go-billy/v5 v5.1.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= github.com/go-git/go-git/v5 v5.3.0 h1:8WKMtJR2j8RntEXR/uvTKagfEt4GYlwQ7mntE4+0GWc= github.com/go-git/go-git/v5 v5.3.0/go.mod h1:xdX4bWJ48aOrdhnl2XqHYstHbbp6+LFS4r4X+lNVprw= @@ -206,6 +214,7 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -227,6 +236,7 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -240,6 +250,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nlepage/go-tarfs v1.1.0 h1:bsACOiZMB/zFjYG/sE01070i9Fl26MnRpw0L6WuyfVs= +github.com/nlepage/go-tarfs v1.1.0/go.mod h1:IhxRcLhLkawBetnwu/JNuoPkq/6cclAllhgEa6SmzS8= github.com/onsi/ginkgo/v2 v2.6.0 h1:9t9b9vRUbFq3C4qKFCGkVuq/fIHji802N1nrtkh1mNc= github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= github.com/operator-framework/api v0.17.2-0.20220915200120-ff2dbc53d381 h1:/XHgTzfI0O/RP3I6WF0BiPLVuVkfgVyiw04b0MyCJ2M= @@ -279,11 +291,13 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= @@ -295,7 +309,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -323,6 +339,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -422,6 +440,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/internal/source/common.go b/internal/source/common.go new file mode 100644 index 00000000..32301a40 --- /dev/null +++ b/internal/source/common.go @@ -0,0 +1,9 @@ +package source + +import ( + "fmt" +) + +func generateMessage(catalogName string) string { + return fmt.Sprintf("Successfully unpacked the %s Bundle", catalogName) +} diff --git a/internal/source/configmaps.go b/internal/source/configmaps.go new file mode 100644 index 00000000..4e5d5dc4 --- /dev/null +++ b/internal/source/configmaps.go @@ -0,0 +1,84 @@ +package source + +import ( + "context" + "fmt" + "path/filepath" + "testing/fstest" + + corev1 "k8s.io/api/core/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" + + catalogdv1beta1 "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" +) + +type ConfigMaps struct { + Reader client.Reader + ConfigMapNamespace string +} + +func (o *ConfigMaps) Unpack(ctx context.Context, catalog *catalogdv1beta1.CatalogSource) (*Result, error) { + if catalog.Spec.Source.Type != catalogdv1beta1.SourceTypeConfigMaps { + return nil, fmt.Errorf("catalog source type %q not supported", catalog.Spec.Source.Type) + } + if catalog.Spec.Source.ConfigMaps == nil { + return nil, fmt.Errorf("catalog source configmaps configuration is unset") + } + + configMapSources := catalog.Spec.Source.ConfigMaps + + catalogFS := fstest.MapFS{} + seenFilepaths := map[string]sets.Set[string]{} + + for _, cmSource := range configMapSources { + cmName := cmSource.ConfigMap.Name + dir := filepath.Clean(cmSource.Path) + + // Validating admission webhook handles validation for: + // - paths outside the catalog root + // - configmaps referenced by catalogs must be immutable + + var cm corev1.ConfigMap + if err := o.Reader.Get(ctx, client.ObjectKey{Name: cmName, Namespace: o.ConfigMapNamespace}, &cm); err != nil { + return nil, fmt.Errorf("get configmap %s/%s: %v", o.ConfigMapNamespace, cmName, err) + } + + addToBundle := func(configMapName, filename string, data []byte) { + filepath := filepath.Join(dir, filename) + if _, ok := seenFilepaths[filepath]; !ok { + seenFilepaths[filepath] = sets.New[string]() + } + seenFilepaths[filepath].Insert(configMapName) + catalogFS[filepath] = &fstest.MapFile{ + Data: data, + } + } + for filename, data := range cm.Data { + addToBundle(cmName, filename, []byte(data)) + } + for filename, data := range cm.BinaryData { + addToBundle(cmName, filename, data) + } + } + + errs := []error{} + for filepath, cmNames := range seenFilepaths { + if len(cmNames) > 1 { + errs = append(errs, fmt.Errorf("duplicate path %q found in configmaps %v", filepath, sets.List(cmNames))) + continue + } + } + if len(errs) > 0 { + return nil, utilerrors.NewAggregate(errs) + } + + resolvedSource := &catalogdv1beta1.CatalogSourceSource{ + Type: catalogdv1beta1.SourceTypeConfigMaps, + ConfigMaps: catalog.Spec.Source.DeepCopy().ConfigMaps, + } + + message := generateMessage("configMaps") + return &Result{FS: catalogFS, ResolvedSource: resolvedSource, State: StateUnpacked, Message: message}, nil +} diff --git a/internal/source/git.go b/internal/source/git.go new file mode 100644 index 00000000..e4431279 --- /dev/null +++ b/internal/source/git.go @@ -0,0 +1,282 @@ +package source + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/memfs" + "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/plumbing/transport/http" + sshgit "github.com/go-git/go-git/v5/plumbing/transport/ssh" + "github.com/go-git/go-git/v5/storage/memory" + "golang.org/x/crypto/ssh" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + catalogdv1beta1 "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" +) + +type Git struct { + client.Reader + SecretNamespace string +} + +func (r *Git) Unpack(ctx context.Context, catalog *catalogdv1beta1.CatalogSource) (*Result, error) { + if catalog.Spec.Source.Type != catalogdv1beta1.SourceTypeGit { + return nil, fmt.Errorf("catalog source type %q not supported", catalog.Spec.Source.Type) + } + if catalog.Spec.Source.Git == nil { + return nil, fmt.Errorf("catalog source git configuration is unset") + } + gitsource := catalog.Spec.Source.Git + if gitsource.Repository == "" { + // This should never happen because the validation webhook rejects git catalogs without repository + return nil, errors.New("missing git source information: repository must be provided") + } + + // Set options for clone + progress := bytes.Buffer{} + cloneOpts := git.CloneOptions{ + URL: gitsource.Repository, + Progress: &progress, + Tags: git.NoTags, + InsecureSkipTLS: catalog.Spec.Source.Git.Auth.InsecureSkipVerify, + } + + if catalog.Spec.Source.Git.Auth.Secret.Name != "" { + auth, err := r.configAuth(ctx, catalog) + if err != nil { + return nil, fmt.Errorf("configuring Auth error: %w", err) + } + cloneOpts.Auth = auth + } + + if gitsource.Ref.Branch != "" { + cloneOpts.ReferenceName = plumbing.ReferenceName(fmt.Sprintf("refs/heads/%s", gitsource.Ref.Branch)) + cloneOpts.SingleBranch = true + cloneOpts.Depth = 1 + } else if gitsource.Ref.Tag != "" { + cloneOpts.ReferenceName = plumbing.ReferenceName(fmt.Sprintf("refs/tags/%s", gitsource.Ref.Tag)) + cloneOpts.SingleBranch = true + cloneOpts.Depth = 1 + } + + // Clone + repo, err := git.CloneContext(ctx, memory.NewStorage(), memfs.New(), &cloneOpts) + if err != nil { + return nil, fmt.Errorf("catalog unpack git clone error: %v - %s", err, progress.String()) + } + wt, err := repo.Worktree() + if err != nil { + return nil, fmt.Errorf("catalog unpack error: %v", err) + } + + // Checkout commit + if gitsource.Ref.Commit != "" { + commitHash := plumbing.NewHash(gitsource.Ref.Commit) + if err := wt.Reset(&git.ResetOptions{ + Commit: commitHash, + Mode: git.HardReset, + }); err != nil { + return nil, fmt.Errorf("checkout commit %q: %v", commitHash.String(), err) + } + } + + var catalogFS fs.FS = &billyFS{wt.Filesystem} + + // Subdirectory + if gitsource.Directory != "" { + directory := filepath.Clean(gitsource.Directory) + if directory[:3] == "../" || directory[0] == '/' { + return nil, fmt.Errorf("get subdirectory %q for repository %q: %s", gitsource.Directory, gitsource.Repository, "directory can not start with '../' or '/'") + } + sub, err := wt.Filesystem.Chroot(filepath.Clean(directory)) + if err != nil { + return nil, fmt.Errorf("get subdirectory %q for repository %q: %v", gitsource.Directory, gitsource.Repository, err) + } + catalogFS = &billyFS{sub} + } + + commitHash, err := repo.ResolveRevision("HEAD") + if err != nil { + return nil, fmt.Errorf("resolve commit hash: %v", err) + } + + resolvedGit := catalog.Spec.Source.Git.DeepCopy() + resolvedGit.Ref = catalogdv1beta1.GitRef{ + Commit: commitHash.String(), + } + + resolvedSource := &catalogdv1beta1.CatalogSourceSource{ + Type: catalogdv1beta1.SourceTypeGit, + Git: resolvedGit, + } + + message := generateMessage("git") + + return &Result{FS: catalogFS, ResolvedSource: resolvedSource, State: StateUnpacked, Message: message}, nil +} + +func (r *Git) configAuth(ctx context.Context, catalog *catalogdv1beta1.CatalogSource) (transport.AuthMethod, error) { + var auth transport.AuthMethod + if strings.HasPrefix(catalog.Spec.Source.Git.Repository, "http") { + userName, password, err := r.getCredentials(ctx, catalog) + if err != nil { + return nil, err + } + return &http.BasicAuth{Username: userName, Password: password}, nil + } + privatekey, host, err := r.getCertificate(ctx, catalog) + if err != nil { + return nil, err + } + + signer, err := ssh.ParsePrivateKey(privatekey) + if err != nil { + return nil, err + } + auth = &sshgit.PublicKeys{ + User: "git", + Signer: signer, + } + if catalog.Spec.Source.Git.Auth.InsecureSkipVerify { + auth = &sshgit.PublicKeys{ + User: "git", + Signer: signer, + HostKeyCallbackHelper: sshgit.HostKeyCallbackHelper{ + HostKeyCallback: ssh.InsecureIgnoreHostKey(), // nolint:gosec + }, + } + } else if host != nil { + _, _, pubKey, _, _, err := ssh.ParseKnownHosts(host) + if err != nil { + return nil, err + } + auth = &sshgit.PublicKeys{ + User: "git", + Signer: signer, + HostKeyCallbackHelper: sshgit.HostKeyCallbackHelper{ + HostKeyCallback: ssh.FixedHostKey(pubKey), + }, + } + } + return auth, nil +} + +// getCredentials reads credentials from the secret specified in the catalog +// It returns the username ane password when they are in the secret +func (r *Git) getCredentials(ctx context.Context, catalog *catalogdv1beta1.CatalogSource) (string, string, error) { + secret := &corev1.Secret{} + err := r.Get(ctx, client.ObjectKey{Namespace: r.SecretNamespace, Name: catalog.Spec.Source.Git.Auth.Secret.Name}, secret) + if err != nil { + return "", "", err + } + userName := string(secret.Data["username"]) + password := string(secret.Data["password"]) + + return userName, password, nil +} + +// getCertificate reads certificate from the secret specified in the catalog +// It returns the privatekey and the entry of the host in known_hosts when they are in the secret +func (r *Git) getCertificate(ctx context.Context, catalog *catalogdv1beta1.CatalogSource) ([]byte, []byte, error) { + secret := &corev1.Secret{} + err := r.Get(ctx, client.ObjectKey{Namespace: r.SecretNamespace, Name: catalog.Spec.Source.Git.Auth.Secret.Name}, secret) + if err != nil { + return nil, nil, err + } + return secret.Data["ssh-privatekey"], secret.Data["ssh-knownhosts"], nil +} + +// billy.Filesysten -> fs.FS +var ( + _ fs.FS = &billyFS{} + _ fs.ReadDirFS = &billyFS{} + _ fs.ReadFileFS = &billyFS{} + _ fs.StatFS = &billyFS{} + _ fs.File = &billyFile{} +) + +type billyFS struct { + billy.Filesystem +} + +func (f *billyFS) ReadFile(name string) ([]byte, error) { + file, err := f.Filesystem.Open(name) + if err != nil { + return nil, err + } + return io.ReadAll(file) +} + +func (f *billyFS) Open(path string) (fs.File, error) { + fi, err := f.Filesystem.Stat(path) + if err != nil { + return nil, err + } + if fi.IsDir() { + return &billyDirFile{billyFile{nil, fi}, f, path}, nil + } + file, err := f.Filesystem.Open(path) + if err != nil { + return nil, err + } + return &billyFile{file, fi}, nil +} + +func (f *billyFS) ReadDir(name string) ([]fs.DirEntry, error) { + fis, err := f.Filesystem.ReadDir(name) + if err != nil { + return nil, err + } + entries := make([]fs.DirEntry, 0, len(fis)) + for _, fi := range fis { + entries = append(entries, fs.FileInfoToDirEntry(fi)) + } + return entries, nil +} + +type billyFile struct { + billy.File + fi os.FileInfo +} + +func (b billyFile) Stat() (fs.FileInfo, error) { + return b.fi, nil +} + +func (b billyFile) Close() error { + if b.File == nil { + return nil + } + return b.File.Close() +} + +type billyDirFile struct { + billyFile + fs *billyFS + path string +} + +func (d *billyDirFile) ReadDir(n int) ([]fs.DirEntry, error) { + entries, err := d.fs.ReadDir(d.path) + if n <= 0 || n > len(entries) { + n = len(entries) + } + return entries[:n], err +} + +func (d billyDirFile) Read(data []byte) (int, error) { + return 0, &fs.PathError{Op: "read", Path: d.path, Err: syscall.EISDIR} +} diff --git a/internal/source/http.go b/internal/source/http.go new file mode 100644 index 00000000..21a7eb08 --- /dev/null +++ b/internal/source/http.go @@ -0,0 +1,88 @@ +package source + +import ( + "compress/gzip" + "context" + "crypto/tls" + "fmt" + "net/http" + "time" + + "github.com/nlepage/go-tarfs" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + catalogdv1beta1 "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" +) + +// http is a catalog source that sources catalogs from the specified url. +type HTTP struct { + client.Reader + SecretNamespace string +} + +// Unpack unpacks a catalog by requesting the catalog contents from a specified URL +func (b *HTTP) Unpack(ctx context.Context, catalog *catalogdv1beta1.CatalogSource) (*Result, error) { + if catalog.Spec.Source.Type != catalogdv1beta1.SourceTypeHTTP { + return nil, fmt.Errorf("cannot unpack source type %q with %q unpacker", catalog.Spec.Source.Type, catalogdv1beta1.SourceTypeHTTP) + } + + url := catalog.Spec.Source.HTTP.URL + action := fmt.Sprintf("%s %s", http.MethodGet, url) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("create http request %q for catalog content: %v", action, err) + } + var userName, password string + if catalog.Spec.Source.HTTP.Auth.Secret.Name != "" { + userName, password, err = b.getCredentials(ctx, catalog) + if err != nil { + return nil, err + } + req.SetBasicAuth(userName, password) + } + + httpClient := http.Client{Timeout: 10 * time.Second} + if catalog.Spec.Source.HTTP.Auth.InsecureSkipVerify { + tr := http.DefaultTransport.(*http.Transport).Clone() + tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} // nolint:gosec + httpClient.Transport = tr + } + + resp, err := httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("%s: http request for catalog content failed: %v", action, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s: unexpected status %q", action, resp.Status) + } + + tarReader, err := gzip.NewReader(resp.Body) + if err != nil { + return nil, err + } + fs, err := tarfs.New(tarReader) + if err != nil { + return nil, fmt.Errorf("error creating FS: %s", err) + } + + message := generateMessage("http") + + return &Result{FS: fs, ResolvedSource: catalog.Spec.Source.DeepCopy(), State: StateUnpacked, Message: message}, nil +} + +// getCredentials reads credentials from the secret specified in the catalog +// It returns the username ane password when they are in the secret +func (b *HTTP) getCredentials(ctx context.Context, catalog *catalogdv1beta1.CatalogSource) (string, string, error) { + secret := &corev1.Secret{} + err := b.Get(ctx, client.ObjectKey{Namespace: b.SecretNamespace, Name: catalog.Spec.Source.HTTP.Auth.Secret.Name}, secret) + if err != nil { + return "", "", err + } + userName := string(secret.Data["username"]) + password := string(secret.Data["password"]) + + return userName, password, nil +} diff --git a/internal/source/image.go b/internal/source/image.go new file mode 100644 index 00000000..5da0bac8 --- /dev/null +++ b/internal/source/image.go @@ -0,0 +1,272 @@ +package source + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "io/fs" + "strings" + + "github.com/nlepage/go-tarfs" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + applyconfigurationcorev1 "k8s.io/client-go/applyconfigurations/core/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + catalogdv1beta1 "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" +) + +type Image struct { + Client client.Client + KubeClient kubernetes.Interface + PodNamespace string + UnpackImage string +} + +const imageBundleUnpackContainerName = "catalog" + +func (i *Image) Unpack(ctx context.Context, catalog *catalogdv1beta1.CatalogSource) (*Result, error) { + if catalog.Spec.Source.Type != catalogdv1beta1.SourceTypeImage { + return nil, fmt.Errorf("catalog source type %q not supported", catalog.Spec.Source.Type) + } + if catalog.Spec.Source.Image == nil { + return nil, fmt.Errorf("catalog source image configuration is unset") + } + + pod := &corev1.Pod{} + op, err := i.ensureUnpackPod(ctx, catalog, pod) + if err != nil { + return nil, err + } else if op == controllerutil.OperationResultCreated || op == controllerutil.OperationResultUpdated || pod.DeletionTimestamp != nil { + return &Result{State: StatePending}, nil + } + + switch phase := pod.Status.Phase; phase { + case corev1.PodPending: + return pendingImagePodResult(pod), nil + case corev1.PodRunning: + return &Result{State: StateUnpacking}, nil + case corev1.PodFailed: + return nil, i.failedPodResult(ctx, pod) + case corev1.PodSucceeded: + return i.succeededPodResult(ctx, pod) + default: + return nil, i.handleUnexpectedPod(ctx, pod) + } +} + +func (i *Image) ensureUnpackPod(ctx context.Context, catalog *catalogdv1beta1.CatalogSource, pod *corev1.Pod) (controllerutil.OperationResult, error) { + existingPod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: i.PodNamespace, Name: catalog.Name}} + if err := i.Client.Get(ctx, client.ObjectKeyFromObject(existingPod), existingPod); client.IgnoreNotFound(err) != nil { + return controllerutil.OperationResultNone, err + } + + podApplyConfig := i.getDesiredPodApplyConfig(catalog) + updatedPod, err := i.KubeClient.CoreV1().Pods(i.PodNamespace).Apply(ctx, podApplyConfig, metav1.ApplyOptions{Force: true, FieldManager: "rukpak-core"}) + if err != nil { + if !apierrors.IsInvalid(err) { + return controllerutil.OperationResultNone, err + } + if err := i.Client.Delete(ctx, existingPod); err != nil { + return controllerutil.OperationResultNone, err + } + updatedPod, err = i.KubeClient.CoreV1().Pods(i.PodNamespace).Apply(ctx, podApplyConfig, metav1.ApplyOptions{Force: true, FieldManager: "rukpak-core"}) + if err != nil { + return controllerutil.OperationResultNone, err + } + } + + // make sure the passed in pod value is updated with the latest + // version of the pod + *pod = *updatedPod + + // compare existingPod to newPod and return an appropriate + // OperatorResult value. + newPod := updatedPod.DeepCopy() + unsetNonComparedPodFields(existingPod, newPod) + if equality.Semantic.DeepEqual(existingPod, newPod) { + return controllerutil.OperationResultNone, nil + } + return controllerutil.OperationResultUpdated, nil +} + +func (i *Image) getDesiredPodApplyConfig(catalog *catalogdv1beta1.CatalogSource) *applyconfigurationcorev1.PodApplyConfiguration { + // TODO (tyslaton): Address unpacker pod allowing root users for image sources + // + // In our current implementation, we are creating a pod that uses the image + // provided by an image source. This pod is not always guaranteed to run as a + // non-root user and thus will fail to initialize if running as root in a PSA + // restricted namespace due to violations. As it currently stands, our compliance + // with PSA is baseline which allows for pods to run as root users. However, + // all RukPak processes and resources, except this unpacker pod for image sources, + // are runnable in a PSA restricted environment. We should consider ways to make + // this PSA definition either configurable or workable in a restricted namespace. + // + // See https://github.com/operator-framework/rukpak/pull/539 for more detail. + containerSecurityContext := applyconfigurationcorev1.SecurityContext(). + WithAllowPrivilegeEscalation(false). + WithCapabilities(applyconfigurationcorev1.Capabilities(). + WithDrop("ALL"), + ) + + podApply := applyconfigurationcorev1.Pod(catalog.Name, i.PodNamespace). + WithLabels(map[string]string{ + "catalogd.operatorframework.io/owner-kind": catalog.Kind, + "catalogd.operatorframework.io/owner-name": catalog.Name, + }). + WithOwnerReferences(v1.OwnerReference(). + WithName(catalog.Name). + WithKind(catalog.Kind). + WithAPIVersion(catalog.APIVersion). + WithUID(catalog.UID). + WithController(true). + WithBlockOwnerDeletion(true), + ). + WithSpec(applyconfigurationcorev1.PodSpec(). + WithAutomountServiceAccountToken(false). + WithRestartPolicy(corev1.RestartPolicyNever). + WithInitContainers(applyconfigurationcorev1.Container(). + WithName("install-unpack"). + WithImage(i.UnpackImage). + WithImagePullPolicy(corev1.PullIfNotPresent). + WithCommand("cp", "-Rv", "/unpack", "/util/bin/unpack"). + WithVolumeMounts(applyconfigurationcorev1.VolumeMount(). + WithName("util"). + WithMountPath("/util/bin"), + ). + WithSecurityContext(containerSecurityContext), + ). + WithContainers(applyconfigurationcorev1.Container(). + WithName(imageBundleUnpackContainerName). + WithImage(catalog.Spec.Source.Image.Ref). + WithCommand("/util/bin/unpack", "--bundle-dir", "/configs"). + WithVolumeMounts(applyconfigurationcorev1.VolumeMount(). + WithName("util"). + WithMountPath("/util/bin"), + ). + WithSecurityContext(containerSecurityContext), + ). + WithVolumes(applyconfigurationcorev1.Volume(). + WithName("util"). + WithEmptyDir(applyconfigurationcorev1.EmptyDirVolumeSource()), + ). + WithSecurityContext(applyconfigurationcorev1.PodSecurityContext(). + WithRunAsNonRoot(false). + WithSeccompProfile(applyconfigurationcorev1.SeccompProfile(). + WithType(corev1.SeccompProfileTypeRuntimeDefault), + ), + ), + ) + + if catalog.Spec.Source.Image.ImagePullSecretName != "" { + podApply.Spec = podApply.Spec.WithImagePullSecrets( + applyconfigurationcorev1.LocalObjectReference().WithName(catalog.Spec.Source.Image.ImagePullSecretName), + ) + } + return podApply +} + +func unsetNonComparedPodFields(pods ...*corev1.Pod) { + for _, p := range pods { + p.APIVersion = "" + p.Kind = "" + p.Status = corev1.PodStatus{} + } +} + +func (i *Image) failedPodResult(ctx context.Context, pod *corev1.Pod) error { + logs, err := i.getPodLogs(ctx, pod) + if err != nil { + return fmt.Errorf("unpack failed: failed to retrieve failed pod logs: %v", err) + } + _ = i.Client.Delete(ctx, pod) + return fmt.Errorf("unpack failed: %v", string(logs)) +} + +func (i *Image) succeededPodResult(ctx context.Context, pod *corev1.Pod) (*Result, error) { + catalogFS, err := i.getBundleContents(ctx, pod) + if err != nil { + return nil, fmt.Errorf("get catalog contents: %v", err) + } + + digest, err := i.getBundleImageDigest(pod) + if err != nil { + return nil, fmt.Errorf("get catalog image digest: %v", err) + } + + resolvedSource := &catalogdv1beta1.CatalogSourceSource{ + Type: catalogdv1beta1.SourceTypeImage, + Image: &catalogdv1beta1.ImageSource{Ref: digest}, + } + + message := generateMessage("image") + + return &Result{FS: catalogFS, ResolvedSource: resolvedSource, State: StateUnpacked, Message: message}, nil +} + +func (i *Image) getBundleContents(ctx context.Context, pod *corev1.Pod) (fs.FS, error) { + catalogData, err := i.getPodLogs(ctx, pod) + if err != nil { + return nil, fmt.Errorf("get catalog contents: %v", err) + } + bd := struct { + Content []byte `json:"content"` + }{} + + if err := json.Unmarshal(catalogData, &bd); err != nil { + return nil, fmt.Errorf("parse catalog data: %v", err) + } + + gzr, err := gzip.NewReader(bytes.NewReader(bd.Content)) + if err != nil { + return nil, fmt.Errorf("read catalog content gzip: %v", err) + } + return tarfs.New(gzr) +} + +func (i *Image) getBundleImageDigest(pod *corev1.Pod) (string, error) { + for _, ps := range pod.Status.ContainerStatuses { + if ps.Name == imageBundleUnpackContainerName && ps.ImageID != "" { + return ps.ImageID, nil + } + } + return "", fmt.Errorf("catalog image digest not found") +} + +func (i *Image) getPodLogs(ctx context.Context, pod *corev1.Pod) ([]byte, error) { + logReader, err := i.KubeClient.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{}).Stream(ctx) + if err != nil { + return nil, fmt.Errorf("get pod logs: %v", err) + } + defer logReader.Close() + buf := &bytes.Buffer{} + if _, err := io.Copy(buf, logReader); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (i *Image) handleUnexpectedPod(ctx context.Context, pod *corev1.Pod) error { + _ = i.Client.Delete(ctx, pod) + return fmt.Errorf("unexpected pod phase: %v", pod.Status.Phase) +} + +func pendingImagePodResult(pod *corev1.Pod) *Result { + var messages []string + for _, cStatus := range append(pod.Status.InitContainerStatuses, pod.Status.ContainerStatuses...) { + if waiting := cStatus.State.Waiting; waiting != nil { + if waiting.Reason == "ErrImagePull" || waiting.Reason == "ImagePullBackOff" { + messages = append(messages, waiting.Message) + } + } + } + return &Result{State: StatePending, Message: strings.Join(messages, "; ")} +} diff --git a/internal/source/unpacker.go b/internal/source/unpacker.go new file mode 100644 index 00000000..f61e12df --- /dev/null +++ b/internal/source/unpacker.go @@ -0,0 +1,127 @@ +package source + +import ( + "context" + "fmt" + "io/fs" + + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/cluster" + + catalogdv1beta1 "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" +) + +// TODO: This package is almost entirely copy/pasted from rukpak. We should look +// into whether it is possible to share this code. +// +// TODO: None of the rukpak CRD validations (both static and from the rukpak +// webhooks) related to the source are present here. Which of them do we need? + +// Unpacker unpacks catalog content, either synchronously or asynchronously and +// returns a Result, which conveys information about the progress of unpacking +// the catalog content. +// +// If a Source unpacks content asynchronously, it should register one or more +// watches with a controller to ensure that Bundles referencing this source +// can be reconciled as progress updates are available. +// +// For asynchronous Sources, multiple calls to Unpack should be made until the +// returned result includes state StateUnpacked. +// +// NOTE: A source is meant to be agnostic to specific catalog formats and +// specifications. A source should treat a catalog root directory as an opaque +// file tree and delegate catalog format concerns to catalog parsers. +type Unpacker interface { + Unpack(context.Context, *catalogdv1beta1.CatalogSource) (*Result, error) +} + +// Result conveys progress information about unpacking catalog content. +type Result struct { + // Bundle contains the full filesystem of a catalog's root directory. + FS fs.FS + + // ResolvedSource is a reproducible view of a Bundle's Source. + // When possible, source implementations should return a ResolvedSource + // that pins the Source such that future fetches of the catalog content can + // be guaranteed to fetch the exact same catalog content as the original + // unpack. + // + // For example, resolved image sources should reference a container image + // digest rather than an image tag, and git sources should reference a + // commit hash rather than a branch or tag. + ResolvedSource *catalogdv1beta1.CatalogSourceSource + + // State is the current state of unpacking the catalog content. + State State + + // Message is contextual information about the progress of unpacking the + // catalog content. + Message string +} + +type State string + +const ( + // StatePending conveys that a request for unpacking a catalog has been + // acknowledged, but not yet started. + StatePending State = "Pending" + + // StateUnpacking conveys that the source is currently unpacking a catalog. + // This state should be used when the catalog contents are being downloaded + // and processed. + StateUnpacking State = "Unpacking" + + // StateUnpacked conveys that the catalog has been successfully unpacked. + StateUnpacked State = "Unpacked" +) + +type unpacker struct { + sources map[catalogdv1beta1.SourceType]Unpacker +} + +// NewUnpacker returns a new composite Source that unpacks catalogs using the source +// mapping provided by the configured sources. +func NewUnpacker(sources map[catalogdv1beta1.SourceType]Unpacker) Unpacker { + return &unpacker{sources: sources} +} + +func (s *unpacker) Unpack(ctx context.Context, catalog *catalogdv1beta1.CatalogSource) (*Result, error) { + source, ok := s.sources[catalog.Spec.Source.Type] + if !ok { + return nil, fmt.Errorf("source type %q not supported", catalog.Spec.Source.Type) + } + return source.Unpack(ctx, catalog) +} + +// NewDefaultUnpacker returns a new composite Source that unpacks catalogs using +// a default source mapping with built-in implementations of all of the supported +// source types. +// +// TODO: refactor NewDefaultUnpacker due to growing parameter list +func NewDefaultUnpacker(systemNsCluster cluster.Cluster, namespace, unpackImage string) (Unpacker, error) { + cfg := systemNsCluster.GetConfig() + kubeClient, err := kubernetes.NewForConfig(cfg) + if err != nil { + return nil, err + } + return NewUnpacker(map[catalogdv1beta1.SourceType]Unpacker{ + catalogdv1beta1.SourceTypeImage: &Image{ + Client: systemNsCluster.GetClient(), + KubeClient: kubeClient, + PodNamespace: namespace, + UnpackImage: unpackImage, + }, + catalogdv1beta1.SourceTypeGit: &Git{ + Reader: systemNsCluster.GetClient(), + SecretNamespace: namespace, + }, + catalogdv1beta1.SourceTypeConfigMaps: &ConfigMaps{ + Reader: systemNsCluster.GetClient(), + ConfigMapNamespace: namespace, + }, + catalogdv1beta1.SourceTypeHTTP: &HTTP{ + Reader: systemNsCluster.GetClient(), + SecretNamespace: namespace, + }, + }), nil +} diff --git a/pkg/apis/core/v1beta1/catalogsource_types.go b/pkg/apis/core/v1beta1/catalogsource_types.go index 8b4412f0..2ffe5a42 100644 --- a/pkg/apis/core/v1beta1/catalogsource_types.go +++ b/pkg/apis/core/v1beta1/catalogsource_types.go @@ -17,14 +17,32 @@ limitations under the License. package v1beta1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const ( - TypeReady = "Ready" +// TODO: The source types, reason, etc. are all copy/pasted from the rukpak +// repository. We should look into whether it is possible to share these. + +type SourceType string - ReasonContentsAvailable = "ContentsAvailable" - ReasonUnpackError = "UnpackError" +const ( + SourceTypeImage SourceType = "image" + SourceTypeGit SourceType = "git" + SourceTypeConfigMaps SourceType = "configMaps" + SourceTypeHTTP SourceType = "http" + + TypeUnpacked = "Unpacked" + + ReasonUnpackPending = "UnpackPending" + ReasonUnpacking = "Unpacking" + ReasonUnpackSuccessful = "UnpackSuccessful" + ReasonUnpackFailed = "UnpackFailed" + + PhasePending = "Pending" + PhaseUnpacking = "Unpacking" + PhaseFailing = "Failing" + PhaseUnpacked = "Unpacked" ) //+kubebuilder:object:root=true @@ -52,22 +70,99 @@ type CatalogSourceList struct { // CatalogSourceSpec defines the desired state of CatalogSource type CatalogSourceSpec struct { - - // Image is the Catalog image that contains Operators' metadata in the FBC format - // https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs - Image string `json:"image"` - - // PollingInterval is used to determine the time interval between checks of the - // latest index image version. The image is polled to see if a new version of the - // image is available. If available, the latest image is pulled and the cache is - // updated to contain the new content. - PollingInterval *metav1.Duration `json:"pollingInterval,omitempty"` + Source CatalogSourceSource `json:"source"` } // CatalogSourceStatus defines the observed state of CatalogSource type CatalogSourceStatus struct { // Conditions store the status conditions of the CatalogSource instances Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + ResolvedSource *CatalogSourceSource `json:"resolvedSource,omitempty"` + Phase string `json:"phase,omitempty"` +} + +type CatalogSourceSource struct { + // Type defines the kind of Catalog content being sourced. + Type SourceType `json:"type"` + // Image is the catalog image that backs the content of this catalog. + Image *ImageSource `json:"image,omitempty"` + // Git is the git repository that backs the content of this Catalog. + Git *GitSource `json:"git,omitempty"` + // ConfigMaps is a list of config map references and their relative + // directory paths that represent a catalog filesystem. + ConfigMaps []ConfigMapSource `json:"configMaps,omitempty"` + // HTTP is the remote location that backs the content of this Catalog. + HTTP *HTTPSource `json:"http,omitempty"` +} + +type ImageSource struct { + // Ref contains the reference to a container image containing Catalog contents. + Ref string `json:"ref"` + // ImagePullSecretName contains the name of the image pull secret in the namespace that the provisioner is deployed. + ImagePullSecretName string `json:"pullSecret,omitempty"` +} + +type GitSource struct { + // Repository is a URL link to the git repository containing the catalog. + // Repository is required and the URL should be parsable by a standard git tool. + Repository string `json:"repository"` + // Directory refers to the location of the catalog within the git repository. + // Directory is optional and if not set defaults to ./manifests. + Directory string `json:"directory,omitempty"` + // Ref configures the git source to clone a specific branch, tag, or commit + // from the specified repo. Ref is required, and exactly one field within Ref + // is required. Setting more than one field or zero fields will result in an + // error. + Ref GitRef `json:"ref"` + // Auth configures the authorization method if necessary. + Auth Authorization `json:"auth,omitempty"` +} + +type ConfigMapSource struct { + // ConfigMap is a reference to a configmap in the catalogd system namespace + ConfigMap corev1.LocalObjectReference `json:"configMap"` + // Path is the relative directory path within the catalog where the files + // from the configmap will be present when the catalog is unpacked. + Path string `json:"path,omitempty"` +} + +type HTTPSource struct { + // URL is where the catalog contents is. + URL string `json:"url"` + // Auth configures the authorization method if necessary. + Auth Authorization `json:"auth,omitempty"` +} + +type ConfigMapRef struct { + Name string `json:"name"` + Namespace string `json:"namespace"` +} + +type GitRef struct { + // Branch refers to the branch to checkout from the repository. + // The Branch should contain the catalog manifests in the specified directory. + Branch string `json:"branch,omitempty"` + // Tag refers to the tag to checkout from the repository. + // The Tag should contain the catalog manifests in the specified directory. + Tag string `json:"tag,omitempty"` + // Commit refers to the commit to checkout from the repository. + // The Commit should contain the catalog manifests in the specified directory. + Commit string `json:"commit,omitempty"` +} + +type Authorization struct { + // Secret contains reference to the secret that has authorization information and is in the namespace that the provisioner is deployed. + // The secret is expected to contain `data.username` and `data.password` for the username and password, respectively for http(s) scheme. + // Refer to https://kubernetes.io/docs/concepts/configuration/secret/#basic-authentication-secret + // For the ssh authorization of the GitSource, the secret is expected to contain `data.ssh-privatekey` and `data.ssh-knownhosts` for the ssh privatekey and the host entry in the known_hosts file respectively. + // Refer to https://kubernetes.io/docs/concepts/configuration/secret/#ssh-authentication-secrets + Secret corev1.LocalObjectReference `json:"secret,omitempty"` + // InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. If InsecureSkipVerify + // is true, the clone operation will accept any certificate presented by the server and any host name in that + // certificate. In this mode, TLS is susceptible to machine-in-the-middle attacks unless custom verification is + // used. This should be used only for testing. + InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` } func init() { diff --git a/pkg/apis/core/v1beta1/zz_generated.deepcopy.go b/pkg/apis/core/v1beta1/zz_generated.deepcopy.go index 3c9835ee..3c835cbd 100644 --- a/pkg/apis/core/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/core/v1beta1/zz_generated.deepcopy.go @@ -26,6 +26,22 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authorization) DeepCopyInto(out *Authorization) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authorization. +func (in *Authorization) DeepCopy() *Authorization { + if in == nil { + return nil + } + out := new(Authorization) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BundleMetadata) DeepCopyInto(out *BundleMetadata) { *out = *in @@ -187,13 +203,44 @@ func (in *CatalogSourceList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogSourceSpec) DeepCopyInto(out *CatalogSourceSpec) { +func (in *CatalogSourceSource) DeepCopyInto(out *CatalogSourceSource) { *out = *in - if in.PollingInterval != nil { - in, out := &in.PollingInterval, &out.PollingInterval - *out = new(v1.Duration) + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ImageSource) **out = **in } + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitSource) + **out = **in + } + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]ConfigMapSource, len(*in)) + copy(*out, *in) + } + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPSource) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceSource. +func (in *CatalogSourceSource) DeepCopy() *CatalogSourceSource { + if in == nil { + return nil + } + out := new(CatalogSourceSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogSourceSpec) DeepCopyInto(out *CatalogSourceSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceSpec. @@ -216,6 +263,11 @@ func (in *CatalogSourceStatus) DeepCopyInto(out *CatalogSourceStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ResolvedSource != nil { + in, out := &in.ResolvedSource, &out.ResolvedSource + *out = new(CatalogSourceSource) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceStatus. @@ -248,6 +300,85 @@ func (in *ChannelEntry) DeepCopy() *ChannelEntry { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapRef) DeepCopyInto(out *ConfigMapRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapRef. +func (in *ConfigMapRef) DeepCopy() *ConfigMapRef { + if in == nil { + return nil + } + out := new(ConfigMapRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapSource) DeepCopyInto(out *ConfigMapSource) { + *out = *in + out.ConfigMap = in.ConfigMap +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapSource. +func (in *ConfigMapSource) DeepCopy() *ConfigMapSource { + if in == nil { + return nil + } + out := new(ConfigMapSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRef) DeepCopyInto(out *GitRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRef. +func (in *GitRef) DeepCopy() *GitRef { + if in == nil { + return nil + } + out := new(GitRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitSource) DeepCopyInto(out *GitSource) { + *out = *in + out.Ref = in.Ref + out.Auth = in.Auth +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitSource. +func (in *GitSource) DeepCopy() *GitSource { + if in == nil { + return nil + } + out := new(GitSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPSource) DeepCopyInto(out *HTTPSource) { + *out = *in + out.Auth = in.Auth +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSource. +func (in *HTTPSource) DeepCopy() *HTTPSource { + if in == nil { + return nil + } + out := new(HTTPSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Icon) DeepCopyInto(out *Icon) { *out = *in @@ -263,6 +394,21 @@ func (in *Icon) DeepCopy() *Icon { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSource) DeepCopyInto(out *ImageSource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSource. +func (in *ImageSource) DeepCopy() *ImageSource { + if in == nil { + return nil + } + out := new(ImageSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Package) DeepCopyInto(out *Package) { *out = *in diff --git a/pkg/controllers/core/catalogsource_controller.go b/pkg/controllers/core/catalogsource_controller.go index 808e674b..eb56381d 100644 --- a/pkg/controllers/core/catalogsource_controller.go +++ b/pkg/controllers/core/catalogsource_controller.go @@ -17,23 +17,16 @@ limitations under the License. package core import ( - "bytes" "context" "fmt" - "io" - "time" "github.com/operator-framework/operator-registry/alpha/declcfg" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" apimacherrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -41,15 +34,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" + "github.com/operator-framework/catalogd/internal/source" corev1beta1 "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" ) // CatalogSourceReconciler reconciles a CatalogSource object type CatalogSourceReconciler struct { client.Client - Scheme *runtime.Scheme - Cfg *rest.Config - OpmImage string + + Unpacker source.Unpacker } //+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=catalogsources,verbs=get;list;watch;create;update;patch;delete @@ -61,9 +54,8 @@ type CatalogSourceReconciler struct { //+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=packages,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=packages/status,verbs=get;update;patch //+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=packages/finalizers,verbs=update -//+kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch +//+kubebuilder:rbac:groups=core,resources=pods,verbs=create;update;patch;delete;get;list;watch //+kubebuilder:rbac:groups=core,resources=pods/log,verbs=get;list;watch -//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=create;get;list;watch // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -113,113 +105,91 @@ func (r *CatalogSourceReconciler) SetupWithManager(mgr ctrl.Manager) error { // for https://github.com/operator-framework/catalogd/issues/6. The fix for // #6 should also remove the usage of `builder.WithPredicates(predicate.GenerationChangedPredicate{})` For(&corev1beta1.CatalogSource{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Owns(&corev1.Pod{}). Complete(r) } func (r *CatalogSourceReconciler) reconcile(ctx context.Context, catalogSource *corev1beta1.CatalogSource) (ctrl.Result, error) { - job, err := r.ensureUnpackJob(ctx, catalogSource) + unpackResult, err := r.Unpacker.Unpack(ctx, catalogSource) if err != nil { - updateStatusError(catalogSource, err) - return ctrl.Result{}, fmt.Errorf("ensuring unpack job: %v", err) + return ctrl.Result{}, updateStatusUnpackFailing(&catalogSource.Status, fmt.Errorf("source bundle content: %v", err)) } - complete, err := r.checkUnpackJobComplete(ctx, job) - if err != nil { - updateStatusError(catalogSource, err) - return ctrl.Result{}, fmt.Errorf("ensuring unpack job completed: %v", err) - } - if !complete { - return ctrl.Result{RequeueAfter: 10 * time.Second}, nil - } + switch unpackResult.State { + case source.StatePending: + updateStatusUnpackPending(&catalogSource.Status, unpackResult) + return ctrl.Result{}, nil + case source.StateUnpacking: + updateStatusUnpacking(&catalogSource.Status, unpackResult) + return ctrl.Result{}, nil + case source.StateUnpacked: + // TODO: We should check to see if the unpacked result has the same content + // as the already unpacked content. If it does, we should skip this rest + // of the unpacking steps. + fbc, err := declcfg.LoadFS(unpackResult.FS) + if err != nil { + return ctrl.Result{}, updateStatusUnpackFailing(&catalogSource.Status, fmt.Errorf("load FBC from filesystem: %v", err)) + } - declCfg, err := r.parseUnpackLogs(ctx, job) - if err != nil { - updateStatusError(catalogSource, err) - return ctrl.Result{}, err - } + if err := r.createPackages(ctx, fbc, catalogSource); err != nil { + return ctrl.Result{}, updateStatusUnpackFailing(&catalogSource.Status, fmt.Errorf("create package objects: %v", err)) + } - if err := r.createPackages(ctx, declCfg, catalogSource); err != nil { - updateStatusError(catalogSource, err) - return ctrl.Result{}, err - } + if err := r.createBundleMetadata(ctx, fbc, catalogSource); err != nil { + return ctrl.Result{}, updateStatusUnpackFailing(&catalogSource.Status, fmt.Errorf("create bundle metadata objects: %v", err)) + } - if err := r.createBundleMetadata(ctx, declCfg, catalogSource); err != nil { - updateStatusError(catalogSource, err) - return ctrl.Result{}, err + updateStatusUnpacked(&catalogSource.Status, unpackResult) + return ctrl.Result{}, nil + default: + return ctrl.Result{}, updateStatusUnpackFailing(&catalogSource.Status, fmt.Errorf("unknown unpack state %q: %v", unpackResult.State, err)) } - // update CatalogSource status as "Ready" since at this point - // all catalog content should be available on cluster - updateStatusReady(catalogSource) - return ctrl.Result{}, nil } -// ensureUnpackJob will ensure that an unpack job has been created for the given -// CatalogSource. It will return the unpack job if successful (either the Job already -// exists or one was successfully created) or an error if it is unsuccessful -func (r *CatalogSourceReconciler) ensureUnpackJob(ctx context.Context, catalogSource *corev1beta1.CatalogSource) (*batchv1.Job, error) { - // Create the unpack Job manifest for the given CatalogSource - job := r.unpackJob(catalogSource) - - // If the Job already exists just return it. If it doesn't then attempt to create it - err := r.Client.Get(ctx, client.ObjectKeyFromObject(job), job) - if err != nil { - if errors.IsNotFound(err) { - if err = r.createUnpackJob(ctx, catalogSource); err != nil { - return nil, err - } - return job, nil - } - return nil, err - } - - return job, nil +func updateStatusUnpackPending(status *corev1beta1.CatalogSourceStatus, result *source.Result) { + status.ResolvedSource = nil + status.Phase = corev1beta1.PhasePending + meta.SetStatusCondition(&status.Conditions, metav1.Condition{ + Type: corev1beta1.TypeUnpacked, + Status: metav1.ConditionFalse, + Reason: corev1beta1.ReasonUnpackPending, + Message: result.Message, + }) } -// checkUnpackJobComplete will check whether or not an unpack Job has completed. -// It will return a boolean that is true if the Job has successfully completed, -// false if the Job has not completed, or an error if the Job is completed but in a -// "Failed", "FailureTarget", or "Suspended" state or an error is encountered -// when attempting to check the status of the Job -func (r *CatalogSourceReconciler) checkUnpackJobComplete(ctx context.Context, job *batchv1.Job) (bool, error) { - // If the completion time is non-nil that means the Job has completed - if job.Status.CompletionTime != nil { - // Loop through the conditions and check for any fail conditions - for _, cond := range job.Status.Conditions { - if cond.Status == v1.ConditionTrue && cond.Type != batchv1.JobComplete { - return false, fmt.Errorf("unpack job has condition %q with a status of %q", cond.Type, cond.Status) - } - } - // No failures and job has a completion time so job successfully completed - return true, nil - } - return false, nil +func updateStatusUnpacking(status *corev1beta1.CatalogSourceStatus, result *source.Result) { + status.ResolvedSource = nil + status.Phase = corev1beta1.PhaseUnpacking + meta.SetStatusCondition(&status.Conditions, metav1.Condition{ + Type: corev1beta1.TypeUnpacked, + Status: metav1.ConditionFalse, + Reason: corev1beta1.ReasonUnpacking, + Message: result.Message, + }) } -// updateStatusReady will update the CatalogSource.Status.Conditions -// to have the "Ready" condition with a status of "True" and a Reason -// of "ContentsAvailable". This function is used to signal that a CatalogSource -// has been successfully unpacked and all catalog contents are available on cluster -func updateStatusReady(catalogSource *corev1beta1.CatalogSource) { - meta.SetStatusCondition(&catalogSource.Status.Conditions, metav1.Condition{ - Type: corev1beta1.TypeReady, - Reason: corev1beta1.ReasonContentsAvailable, +func updateStatusUnpacked(status *corev1beta1.CatalogSourceStatus, result *source.Result) { + status.ResolvedSource = result.ResolvedSource + status.Phase = corev1beta1.PhaseUnpacked + meta.SetStatusCondition(&status.Conditions, metav1.Condition{ + Type: corev1beta1.TypeUnpacked, Status: metav1.ConditionTrue, - Message: "catalog contents have been unpacked and are available on cluster", + Reason: corev1beta1.ReasonUnpackSuccessful, + Message: result.Message, }) } -// updateStatusError will update the CatalogSource.Status.Conditions -// to have the condition Type "Ready" with a Status of "False" and a Reason -// of "UnpackError". This function is used to signal that a CatalogSource -// is in an error state and that catalog contents are not available on cluster -func updateStatusError(catalogSource *corev1beta1.CatalogSource, err error) { - meta.SetStatusCondition(&catalogSource.Status.Conditions, metav1.Condition{ - Type: corev1beta1.TypeReady, +func updateStatusUnpackFailing(status *corev1beta1.CatalogSourceStatus, err error) error { + status.ResolvedSource = nil + status.Phase = corev1beta1.PhaseFailing + meta.SetStatusCondition(&status.Conditions, metav1.Condition{ + Type: corev1beta1.TypeUnpacked, Status: metav1.ConditionFalse, - Reason: corev1beta1.ReasonUnpackError, + Reason: corev1beta1.ReasonUnpackFailed, Message: err.Error(), }) + return err } // createBundleMetadata will create a `BundleMetadata` resource for each @@ -259,7 +229,9 @@ func (r *CatalogSourceReconciler) createBundleMetadata(ctx context.Context, decl }) } - ctrlutil.SetOwnerReference(catalogSource, &bundleMeta, r.Scheme) + if err := ctrlutil.SetControllerReference(catalogSource, &bundleMeta, r.Client.Scheme()); err != nil { + return fmt.Errorf("set controller reference on bundlemetadata %q: %v", bundleMeta.Name, err) + } if err := r.Client.Create(ctx, &bundleMeta); err != nil { return fmt.Errorf("creating bundlemetadata %q: %w", bundleMeta.Name, err) @@ -310,7 +282,9 @@ func (r *CatalogSourceReconciler) createPackages(ctx context.Context, declCfg *d } } - ctrlutil.SetOwnerReference(catalogSource, &pack, r.Scheme) + if err := ctrlutil.SetControllerReference(catalogSource, &pack, r.Client.Scheme()); err != nil { + return fmt.Errorf("set controller reference on package %q: %v", pack.Name, err) + } if err := r.Client.Create(ctx, &pack); err != nil { return fmt.Errorf("creating package %q: %w", pack.Name, err) @@ -318,87 +292,3 @@ func (r *CatalogSourceReconciler) createPackages(ctx context.Context, declCfg *d } return nil } - -// createUnpackJob creates an unpack Job for the given CatalogSource -func (r *CatalogSourceReconciler) createUnpackJob(ctx context.Context, cs *corev1beta1.CatalogSource) error { - job := r.unpackJob(cs) - - ctrlutil.SetOwnerReference(cs, job, r.Scheme) - - if err := r.Client.Create(ctx, job); err != nil { - return fmt.Errorf("creating unpackJob: %w", err) - } - - return nil -} - -// parseUnpackLogs parses the Pod logs from the Pod created by the -// provided unpack Job into a `declcfg.DeclarativeConfig` object -func (r *CatalogSourceReconciler) parseUnpackLogs(ctx context.Context, job *batchv1.Job) (*declcfg.DeclarativeConfig, error) { - clientset, err := kubernetes.NewForConfig(r.Cfg) - if err != nil { - return nil, fmt.Errorf("creating clientset: %w", err) - } - - podsForJob := &v1.PodList{} - err = r.Client.List(ctx, podsForJob, client.MatchingLabels{"job-name": job.GetName()}) - if err != nil { - return nil, fmt.Errorf("listing pods: %w", err) - } - - if len(podsForJob.Items) <= 0 { - return nil, fmt.Errorf("no pods for job") - } - pod := podsForJob.Items[0] - - // TODO: Should we remove this check since we verify the Job has completed before calling this making this redundant? - if pod.Status.Phase != v1.PodSucceeded { - return nil, fmt.Errorf("job pod in phase %q, expected %q", pod.Status.Phase, v1.PodSucceeded) - } - - req := clientset.CoreV1().Pods(job.Namespace).GetLogs(pod.GetName(), &v1.PodLogOptions{}) - podLogs, err := req.Stream(ctx) - if err != nil { - return nil, fmt.Errorf("streaming pod logs: %w", err) - } - defer podLogs.Close() - - logs, err := io.ReadAll(podLogs) - if err != nil { - return nil, fmt.Errorf("reading pod logs: %w", err) - } - - return declcfg.LoadReader(bytes.NewReader(logs)) -} - -// unpackJob creates the manifest for an unpack Job given a CatalogSource -func (r *CatalogSourceReconciler) unpackJob(cs *corev1beta1.CatalogSource) *batchv1.Job { - return &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "catalogd-system", - Name: fmt.Sprintf("%s-image-unpack", cs.Name), - }, - Spec: batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "catalogd-system", - Name: fmt.Sprintf("%s-image-unpack-pod", cs.Name), - }, - Spec: v1.PodSpec{ - RestartPolicy: v1.RestartPolicyOnFailure, - Containers: []v1.Container{ - { - Image: r.OpmImage, - Name: "unpacker", - Command: []string{ - "opm", - "render", - cs.Spec.Image, - }, - }, - }, - }, - }, - }, - } -}